content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import h5py
import os
import numpy as np | [
11748,
289,
20,
9078,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941
] | 3.076923 | 13 |
import json
from notebook.base.handlers import APIHandler
from notebook.utils import url_path_join
import tornado
import os
import logging
import shutil
_extension_home_dir = os.path.join(os.environ["HOME"],'.jl-bookmarks')
if not os.path.exists(_extension_home_dir):
## Create <USER_HOME/.jl-bookmarks dir if it doesn't exists.
try:
os.mkdir(_extension_home_dir)
except OSError:
print(f'Could not create extension home dir at: {_extension_home_dir}')
_log_file_path = os.path.join(os.environ["HOME"],'.jl-bookmarks', 'JL-Bookmarks.log')
_settings_file_path = os.path.join(os.environ["HOME"],'.jl-bookmarks', 'settings.json')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logFileHandler = logging.FileHandler(_log_file_path, mode='a', encoding='utf-8', delay=False)
logFileHandler.setFormatter(formatter)
logger.addHandler(logFileHandler)
logger.propagate=False
_bookmarks = None
| [
11748,
33918,
198,
198,
6738,
20922,
13,
8692,
13,
4993,
8116,
1330,
7824,
25060,
198,
6738,
20922,
13,
26791,
1330,
19016,
62,
6978,
62,
22179,
198,
11748,
33718,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
4423,
346,
198,
198,
6... | 2.739837 | 369 |
#!/usr/bin/env python
import cgt
for (name,val) in cgt.__dict__.iteritems():
if not name.startswith("_"):
if not val.__doc__:
print "API function %s requires docstring!"%name
for (name,val) in cgt.core.__dict__.iteritems():
if isinstance(val, type) and issubclass(val, cgt.core.Op):
if val.get_native_compile_info == cgt.core.Op.get_native_compile_info:
print "Op %s is missing 'get_native_compile_info'!"%name
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
269,
13655,
198,
1640,
357,
3672,
11,
2100,
8,
287,
269,
13655,
13,
834,
11600,
834,
13,
2676,
23814,
33529,
198,
220,
220,
220,
611,
407,
1438,
13,
9688,
2032,
342,
7203,
62,... | 2.292079 | 202 |
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import password_validation
from django import forms
# from django.contrib.auth import get_user_model
from django.utils.translation import gettext, gettext_lazy as _
# from django.contrib.auth.forms import UserCreationForm, UsernameField, AuthenticationForm
# User = get_user_model()
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
220,
9206,
62,
12102,
341,
198,
6738,
42625,
14208,
133... | 3.491071 | 112 |
from constants import *
from descriptors import *
| [
6738,
38491,
1330,
1635,
198,
6738,
12145,
669,
1330,
1635,
198
] | 4.545455 | 11 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(youansheng@gmail.com), Xiangtai(lxtpku@pku.edu.cn)
# Select Seg Model for img segmentation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.models.nets.asp_ocnet import AspOCNet
from lib.models.nets.base_ocnet import BaseOCNet
from lib.models.nets.deeplabv3 import DeepLabV3
from lib.models.nets.denseassp import DenseASPP
from lib.models.nets.fast_asp_ocnet import FastAspOCNet
from lib.models.nets.fast_base_ocnet import FastBaseOCNet
from lib.models.nets.pspnet import PSPNet
from lib.models.nets.pyramid_ocnet import PyramidOCNet
from lib.utils.tools.logger import Logger as Log
SEG_MODEL_DICT = {
'deeplabv3': DeepLabV3,
'pspnet': PSPNet,
'denseaspp': DenseASPP,
'asp_ocnet': AspOCNet,
'base_ocnet': BaseOCNet,
'pyramid_ocnet': PyramidOCNet,
'fast_base_ocnet': FastBaseOCNet,
'fast_asp_ocnet': FastAspOCNet,
}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
2094,
3281,
921,
7,
5832,
504,
31753,
31,
14816,
13,
785,
828,
45641,
83,
1872,
7,
75,
742,
79,
23063,
31... | 2.58377 | 382 |
import pandas as pd
import tempfile
import matplotlib.pyplot as plt
from collections import defaultdict
import pickle
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, precision_score, recall_score
from nltk.corpus import stopwords
from sklearn.preprocessing import label_binarize
import os
plt.style.use('ggplot')
f = open('gestructureerde verslaglegging.xlsx', 'r')
STOPWORDS_DUTCH = stopwords.words('dutch')
def visFreqAnnotations():
'''bar vis of the 10 most occuring code annotations'''
N = 10
ind = range(N)
width = 0.35
plt.bar(ind, ordered_annotations[:N])
plt.xticks(ind, ordered_annotations[:N].index, rotation=17)
def showFreq(corpus, clf, top=10):
'''
Most occuring words
'''
vect = clf.named_steps.get('vectorizer')
X = vect.fit_transform(corpus) #vectorize words
vect = clf.named_steps.get('vectorizer')
zip(vect.get_feature_names(),np.asarray(X.sum(axis=0)).ravel())
return sorted(zip(vect.get_feature_names(),np.asarray(X.sum(axis=0)).ravel()),\
key=lambda x: x[1], reverse=True)[:top]
def print_top10(vectorizer, clf, class_labels):
"""Prints features with the highest coefficient values, per class"""
feature_names = vectorizer.get_feature_names()
for i, class_label in enumerate(class_labels):
top10 = np.argsort(clf.coef_[i])[-10:]
print("\n%s: %s" % (enkele_labels[class_label],
" ".join(feature_names[j] for j in top10)))
df = pd.read_excel(f).dropna()
#freq different possible code combinations
ordered_annotations = df['CodeAnnotaties'].value_counts()
#freq different possible described combinations
description_annotations = df['OmsAnnotaties'].value_counts()
#amount of unique code_annotations
unique_annotations = len(set(ordered_annotations))
#beschrijvingen die bij annotatie massa horen
decription_massa = df[df['OmsAnnotaties']=='Massa']
verzameling_categorie = defaultdict(list)
for annotatie, text in df[['CodeAnnotaties', 'EindverslagHuisartsTekst']].values:
verzameling_categorie[annotatie].append(text)
enkele_labels = list(set([key.split(';')[0] for key in verzameling_categorie.keys()]))
#aantal verslagen per enkele label
print ordered_annotations.ix[enkele_labels]
X_train, Y_train, X_test, Y_test = reorderData(verzameling_categorie, enkele_labels)
clf, labels, predicted = trainAndPredict(X_train, Y_train, X_test, Y_test)
# categoriesFiles(['1MAS1', '1MAS2', '2CAL2', '3ARC1'])
#printing top 10 words that define a class, based on frequency.
print_top10(clf.named_steps.get('vectorizer'), clf.named_steps.get('clf'), clf.classes_)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
20218,
7753,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
357... | 2.701987 | 1,057 |
from twisted.internet import defer
from ooni.tests import is_internet_connected, bases
from ooni import geoip
| [
198,
6738,
19074,
13,
37675,
1330,
29135,
198,
198,
6738,
267,
14651,
13,
41989,
1330,
318,
62,
37675,
62,
15236,
11,
12536,
198,
6738,
267,
14651,
1330,
40087,
541,
628
] | 3.766667 | 30 |
# -*- coding: utf-8 -*-
import requests
import settings
import json
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
198,
11748,
6460,
198,
11748,
33918,
628,
628
] | 2.958333 | 24 |
count = int(input("[>] Enter count of tops: "))
graph = [(float("inf"), -1, -1)]
status = True
while True:
status = input("[>] Enter the element of graph (lenght, top 1, top 2): ")
if status:
status = tuple(map(int, status.split()))
graph.append(status)
else:
break
unified = {1}
skeleton = []
while len(unified) < count:
r = get_min(graph, unified)
if r[0] == float("inf"):
break
skeleton.append(r)
unified.add(r[1])
unified.add(r[2])
print(skeleton)
| [
198,
198,
9127,
796,
493,
7,
15414,
7203,
58,
37981,
6062,
954,
286,
21246,
25,
366,
4008,
198,
198,
34960,
796,
47527,
22468,
7203,
10745,
12340,
532,
16,
11,
532,
16,
15437,
198,
198,
13376,
796,
6407,
198,
198,
4514,
6407,
25,
19... | 2.245763 | 236 |
import numpy as np
import os
import sys
import tensorflow as tf
from collections import defaultdict
import cv2
# This is needed since the notebook is stored in the object_detection folder.
#sys.path.append("..")
#from object_detection.utils import ops as utils_ops
#from object_detection.utils import label_map_util
# What model to download.
TOP_DIR = os.path.dirname(os.path.realpath(__file__))
#MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
#MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09'
MODEL_FILE = MODEL_NAME + '.tar.gz'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = TOP_DIR + '/' + MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
17268,
1330,
4277,
11600,
198,
11748,
269,
85,
17,
198,
198,
2,
770,
318,
2622,
1201,
262,
20922,
318,
8574,
287,... | 2.770186 | 322 |
#!/usr/bin/env python
# Copyright (C) 2013-2014 Bastian Kleineidam
"""
Script to get arcamax comics and save the info in a JSON file for further processing.
"""
from __future__ import print_function
import codecs
import re
import sys
import os
import requests
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from dosagelib.util import getPageContent, asciify, unescape
from dosagelib.scraper import get_scraperclasses
from scriptutil import contains_case_insensitive, capfirst, save_result, load_result, truncate_name
json_file = __file__.replace(".py", ".json")
url_matcher = re.compile(r'<li><b><a href="(/thefunnies/[^"]+)">([^<]+)</a>')
# names of comics to exclude
exclude_comics = [
"HagartheHorrible", # better source available
]
def handle_url(url, session, res):
"""Parse one search result page."""
print("Parsing", url, file=sys.stderr)
try:
data, baseUrl = getPageContent(url, session)
except IOError as msg:
print("ERROR:", msg, file=sys.stderr)
return
for match in url_matcher.finditer(data):
shortname = match.group(1)
name = unescape(match.group(2))
name = asciify(name.replace('&', 'And').replace('@', 'At'))
name = capfirst(name)
if name in exclude_comics:
continue
if contains_case_insensitive(res, name):
# we cannot handle two comics that only differ in case
print("INFO: skipping possible duplicate", repr(name), file=sys.stderr)
continue
res[name] = shortname
if not res:
print("ERROR:", "did not match any comics", file=sys.stderr)
def get_results():
"""Parse all search result pages."""
# store info in a dictionary {name -> shortname}
res = {}
session = requests.Session()
handle_url('http://www.arcamax.com/comics', session, res)
save_result(res, json_file)
def has_comic(name):
"""Check if comic name already exists."""
names = [
("Creators/%s" % name).lower(),
("DrunkDuck/%s" % name).lower(),
("GoComics/%s" % name).lower(),
("KeenSpot/%s" % name).lower(),
("ComicGenesis/%s" % name).lower(),
("SmackJeeves/%s" % name).lower(),
]
for scraperclass in get_scraperclasses():
lname = scraperclass.getName().lower()
if lname in names or lname == name.lower():
return True
return False
def print_results(args):
"""Print all comics that have at least the given number of minimum comic strips."""
min_comics, filename = args
with codecs.open(filename, 'a', 'utf-8') as fp:
for name, shortname in sorted(load_result(json_file).items()):
if name in exclude_comics:
continue
if has_comic(name):
prefix = u'#'
else:
prefix = u''
fp.write(u"%sadd(%r, %r)\n" % (prefix, str(truncate_name(name)),
str(shortname)))
if __name__ == '__main__':
if len(sys.argv) > 1:
print_results(sys.argv[1:])
else:
get_results()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
34,
8,
2211,
12,
4967,
17520,
666,
15983,
500,
312,
321,
198,
37811,
198,
7391,
284,
651,
10389,
321,
897,
12770,
290,
3613,
262,
7508,
287,
257,
19449,
2393,
329,
225... | 2.368582 | 1,305 |
import itertools
from learntools.core import *
import learntools.python.solns.word_search as word_search_module
word_search = word_search_module.word_search
import learntools.python.solns.multi_word_search as mws_module
multi_word_search = mws_module.multi_word_search
import learntools.python.solns.diamond as d_module
diamond = d_module.diamond
import learntools.python.solns.roulette_analysis as rou_module
roulette_gt = rou_module.conditional_roulette_probs
LightningLen = MultipartProblem(
ZA,ZB,ZC,ZD,ZE,
)
qvars = bind_exercises(globals(), [
LightningLen,
ZipValidator,
WordSearch,
MultiWordSearch,
DiamondArt,
RouletteAnalyzer
],
tutorial_id=112,
start=0,
)
__all__ = list(qvars)
| [
11748,
340,
861,
10141,
198,
198,
6738,
26338,
10141,
13,
7295,
1330,
1635,
198,
198,
11748,
26338,
10141,
13,
29412,
13,
34453,
5907,
13,
4775,
62,
12947,
355,
1573,
62,
12947,
62,
21412,
198,
4775,
62,
12947,
796,
1573,
62,
12947,
6... | 2.611888 | 286 |
#!/usr/bin/env python
"""A simple developer-centric test script.
"""
import argparse
import os
import time
from dm_api.dm_api import DmApi, DmApiRv
# Get configuration from the environment.
# All the expected variables must be defined...
API_URL: str = os.environ['SQUONK_API_URL']
KEYCLOAK_URL: str = os.environ['SQUONK_API_KEYCLOAK_URL']
KEYCLOAK_REALM: str = os.environ['SQUONK_API_KEYCLOAK_REALM']
KEYCLOAK_CLIENT_ID: str = os.environ['SQUONK_API_KEYCLOAK_CLIENT_ID']
KEYCLOAK_USER: str = os.environ['SQUONK_API_KEYCLOAK_USER']
KEYCLOAK_USER_PASSWORD: str = os.environ['SQUONK_API_KEYCLOAK_USER_PASSWORD']
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
32,
2829,
8517,
12,
28577,
1332,
4226,
13,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
640,
198,
198,
6738,
288,
76,
62,
15042,
13,
36020,
62,
15042,
133... | 2.386861 | 274 |
from threading import Thread
from multiprocessing import Process, Queue
from time import time
from numpy import array_split
# Process einfach durch Thread erstetzen, wenn multithreading statt processing gewünscht ist
if __name__ == "__main__":
main(16, 80000000)
| [
6738,
4704,
278,
1330,
14122,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
11,
4670,
518,
198,
6738,
640,
1330,
640,
198,
6738,
299,
32152,
1330,
7177,
62,
35312,
628,
198,
2,
10854,
304,
10745,
620,
288,
2575,
14122,
1931,
301,
316,... | 3.238095 | 84 |
from autoencoders.VAE import VAE, VAEBallTrack
from keras.preprocessing.image import ImageDataGenerator
DATASET_SIZE = 11862
INPUT_SHAPE = (40, 40, 1)
datagen = ImageDataGenerator(
rescale=1./255,
validation_split=0.2,
horizontal_flip=True,
vertical_flip=True
)
# Allow horizontal flip as a mirror image of a game is a valid game state
train_datagen = datagen.flow_from_directory('images_trans/',
target_size=(
INPUT_SHAPE[0], INPUT_SHAPE[1]),
color_mode='grayscale',
class_mode='input',
shuffle=True,
subset='training',
batch_size=32)
val_datagen = datagen.flow_from_directory('images_trans/',
target_size=(
INPUT_SHAPE[0], INPUT_SHAPE[1]),
color_mode='grayscale',
class_mode='input',
shuffle=True,
subset='validation',
batch_size=32)
vae = VAEBallTrack(
layers=5,
input_shape=INPUT_SHAPE,
latent_size=16,
kernel_size=5,
name="VAEP")
vae.summary()
history = vae.train(train_datagen, val_datagen, epochs=30, batch_size=32)
| [
6738,
1960,
6571,
19815,
364,
13,
11731,
36,
1330,
13753,
36,
11,
13753,
36,
23410,
24802,
198,
6738,
41927,
292,
13,
3866,
36948,
13,
9060,
1330,
7412,
6601,
8645,
1352,
198,
198,
35,
1404,
1921,
2767,
62,
33489,
796,
1367,
4521,
17,... | 1.617043 | 974 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import os
import struct
import sys
import threading
import time
import yaml
import zmq
sys.path.append(os.path.join(os.path.dirname('__file__'), '..'))
from common.pilot_gloval_variable import MPVariable
if __name__ == '__main__':
# Load config
ymlfile = open('../../vehicle/vehicle_config.yml')
cfg = yaml.load(ymlfile)
ymlfile.close()
pub_device = PubDevice(cfg)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
pub_device.end()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
2878,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
11... | 2.342857 | 245 |
import numpy as np
from mpi4py import MPI
from tqdm import tqdm
from ..prob_calculators import get_p_cos1_given_xeff_q_a1, get_p_a1_given_xeff_q
comm = MPI.COMM_WORLD
pe = comm.Get_rank() # identity of this process (process element, sometimes called rank)
nprocs = comm.Get_size() # number of processes
root = nprocs - 1 # special process responsible for administrative work
| [
11748,
299,
32152,
355,
45941,
198,
6738,
285,
14415,
19,
9078,
1330,
4904,
40,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
11485,
1676,
65,
62,
9948,
3129,
2024,
1330,
651,
62,
79,
62,
6966,
16,
62,
35569,
62,
... | 2.879699 | 133 |
"""
# https://code.google.com/p/promisedata/source/browse/#svn%2Ftrunk%2Feffort%2Falbrecht
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
@attribute KLOC numeric
@attribute SCRN numeric
@attribute FORM numeric
@attribute FILE numeric
@attribute ESCRN numeric
@attribute EFORM numeric
@attribute EFILE numeric
@attribute MM numeric
""" | [
37811,
198,
2,
3740,
1378,
8189,
13,
13297,
13,
785,
14,
79,
14,
16963,
1417,
1045,
14,
10459,
14,
25367,
325,
31113,
21370,
77,
4,
17,
37,
2213,
2954,
4,
17,
14304,
487,
419,
4,
17,
41129,
4679,
21474,
198,
198,
23615,
13639,
25,... | 3.27907 | 129 |
import numpy as np
from Cython.Build import cythonize
from setuptools import Extension, find_packages, setup
with open("opteryx/version.py", "r") as v:
vers = v.read()
exec(vers) # nosec
with open("README.md", "r") as rm:
long_description = rm.read()
try:
with open("requirements.txt") as f:
required = f.read().splitlines()
except FileNotFoundError:
# this sometimes fails - so put them here, but this needs to be maintained manually
required = ["cython", "numpy", "orjson", "cityhash", "sqloxide", "pyarrow"]
extensions = [
Extension(
name="cjoin",
sources=["opteryx/third_party/pyarrow_ops/cjoin.pyx"],
include_dirs=[np.get_include()],
),
# Extension(
# name="cythonize",
# sources=["opteryx/third_party/accumulation_tree/accumulation_tree.pyx"],
# )
# "mabel/data/internals/group_by.py",
]
setup(
name="opteryx",
version=__version__,
description="Serverless SQL Engine",
long_description=long_description,
long_description_content_type="text/markdown",
maintainer="Joocer",
author="joocer",
author_email="justin.joyce@joocer.com",
packages=find_packages(include=["opteryx", "opteryx.*"]),
url="https://github.com/mabel-dev/opteryx/",
install_requires=required,
ext_modules=cythonize(extensions),
)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
327,
7535,
13,
15580,
1330,
3075,
400,
261,
1096,
198,
6738,
900,
37623,
10141,
1330,
27995,
11,
1064,
62,
43789,
11,
9058,
198,
198,
4480,
1280,
7203,
404,
11471,
87,
14,
9641,
13,
9078,
1600... | 2.466425 | 551 |
# -*- coding: utf-8 -*-
import argparse
import JackFramework.SysBasic.define as sysdefine
# Parse the train model's para
class ArgsParser(object):
"""docstring for ArgsParser"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1822,
29572,
198,
11748,
3619,
21055,
6433,
13,
44387,
26416,
13,
13086,
355,
25064,
13086,
628,
198,
2,
2547,
325,
262,
4512,
2746,
338,
31215,
198,
4871,
943,
1... | 2.893204 | 103 |
import os
import pickle
from tensorflow.keras.models import load_model
import tensorflow as tf
### chage the base path.
#prefix='/phanns'
prefix=''
path = os.path.abspath(__file__)
root_dir = os.path.dirname(path)
#fasta_dir = os.path.join(root_dir, 'fasta')
model_dir = os.path.join(root_dir, 'deca_model')
fasta_dir = os.path.join(root_dir, 'fasta')
#model_dir = os.path.join(root_dir, 'model_under_val')
#model_under_val
#models=pickle.load(open( os.path.join(model_dir,"deca_model.p"), "rb" ))
#models=pickle.load(open( os.path.join(model_dir,"single.p"), "rb" ))
#graph = tf.get_default_graph()
#global graph
#graph = tf.compat.v1.get_default_graph()
| [
11748,
28686,
198,
11748,
2298,
293,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
27530,
1330,
3440,
62,
19849,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
21017,
442,
496,
262,
2779,
3108,
13,
198,
2,
40290,
11639,
14,
7... | 2.461255 | 271 |
from hvac_cli import cmd
import pytest
from tests.modified_environ import modified_environ
| [
6738,
289,
85,
330,
62,
44506,
1330,
23991,
198,
11748,
12972,
9288,
198,
6738,
5254,
13,
41771,
62,
268,
2268,
1330,
9518,
62,
268,
2268,
628,
628
] | 3.481481 | 27 |
import numpy as np
import torch
from torch.utils.serialization import load_lua
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models import MLModel, datatypes
import _layers
from _layers import _get_layer_converter_fn
from _utils import _gen_layer_name
from _utils import _convert_multiarray_output_to_image
_DEPROCESS_LAYER_NAME = 'deprocess_image'
def _infer_torch_output_shapes(torch_model, input_shapes):
"""
Forward torch model to infer output shape
"""
try:
return _forward_torch_random_input(
torch_model,
input_shapes,
is_batch=False
)
except:
# try batch mode
return _forward_torch_random_input(
torch_model,
input_shapes,
is_batch=True
)
def convert(model,
input_shapes,
input_names=['input'],
output_names=['output'],
mode=None,
image_input_names=[],
preprocessing_args={},
image_output_names=[],
deprocessing_args={},
class_labels=None,
predicted_feature_name='classLabel',
unknown_layer_converter_fn=None):
"""
Convert Torch7 model to CoreML.
Parameters
----------
model: Torch7 model (loaded with PyTorch) | str
A trained Torch7 model loaded in python using PyTorch or path to file
with model (*.t7).
input_shapes: list of tuples
Shapes of the input tensors.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model:
'classifier', a NeuralNetworkClassifier spec will be constructed.
'regressor', a NeuralNetworkRegressor spec will be constructed.
preprocessing_args: dict
'is_bgr', 'red_bias', 'green_bias', 'blue_bias', 'gray_bias',
'image_scale' keys with the same meaning as
https://apple.github.io/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
deprocessing_args: dict
Same as 'preprocessing_args' but for deprocessing.
class_labels: A string or list of strings.
As a string it represents the name of the file which contains
the classification labels (one per line).
As a list of strings it represents a list of categories that map
the index of the output of a neural network to labels in a classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
unknown_layer_converter_fn: function with signature:
(builder, name, layer, input_names, output_names)
builder: object - instance of NeuralNetworkBuilder class
name: str - generated layer name
layer: object - pytorch object for corresponding layer
input_names: list of strings
output_names: list of strings
Returns: list of strings for layer output names
Callback function to handle unknown for torch2coreml layers
Returns
-------
model: A coreml model.
"""
_gen_layer_name.called = 0
_get_layer_converter_fn.unknown_converter_fn = unknown_layer_converter_fn
if isinstance(model, basestring):
torch_model = load_lua(model)
elif isinstance(model, torch.legacy.nn.Sequential):
torch_model = model
else:
raise TypeError(
"Model must be file path to .t7 file or pytorch loaded model \
with torch.legacy.nn.Sequential module as root"
)
torch_model.evaluate()
if not isinstance(input_shapes, list):
raise TypeError("Input shapes should be a list of tuples.")
for shape in input_shapes:
if not isinstance(shape, tuple):
raise TypeError("Input shape should be a tuple.")
if len(input_names) != len(input_shapes):
raise ValueError(
"Input names count must be equal to input shapes count"
)
output_shapes = _infer_torch_output_shapes(
torch_model,
input_shapes
)
if len(output_shapes) != len(output_names):
raise ValueError(
"Model has {} outputs, but you set output_names for {}."
.format(len(output_shapes), len(output_names))
)
# create input/output features
input_features = []
for i in range(len(input_names)):
input_features.append(
(input_names[i], datatypes.Array(*input_shapes[i]))
)
output_features = []
for i in range(len(output_names)):
output_features.append(
(output_names[i], datatypes.Array(*output_shapes[i]))
)
builder = NeuralNetworkBuilder(input_features, output_features, mode)
# build model
layer_name = _gen_layer_name(torch_model)
_output_names = output_names[:]
if len(image_output_names) > 0:
for i in range(len(_output_names)):
if _output_names[i] in image_output_names:
_output_names[i] = _gen_layer_name(_DEPROCESS_LAYER_NAME)
model_output_names = _layers._convert_layer(
builder, layer_name, torch_model, input_names, _output_names
)
# set preprocessing parameters
if len(image_input_names) > 0:
builder.set_pre_processing_parameters(
image_input_names=image_input_names,
is_bgr=preprocessing_args.get('is_bgr', False),
red_bias=preprocessing_args.get('red_bias', 0.0),
green_bias=preprocessing_args.get('green_bias', 0.0),
blue_bias=preprocessing_args.get('blue_bias', 0.0),
gray_bias=preprocessing_args.get('gray_bias', 0.0),
image_scale=preprocessing_args.get('image_scale', 1.0)
)
# set deprocessing parameters
if len(image_output_names) > 0:
for i in range(len(output_names)):
output_name = output_names[i]
if output_name in image_output_names:
output_shape = output_shapes[i]
if len(output_shape) == 2 or output_shape[0] == 1:
is_grayscale = True
elif output_shape[0] == 3:
is_grayscale = False
else:
raise ValueError('Output must be RGB image or Grayscale')
_set_deprocessing(
is_grayscale,
builder,
deprocessing_args,
model_output_names[i],
output_name
)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError(
"synset variable of unknown type. Type found: {}. \
Expected either string or list of strings."
.format(type(class_labels),))
builder.set_class_labels(
class_labels=labels,
predicted_feature_name=predicted_feature_name
)
return MLModel(builder.spec)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
28034,
13,
26791,
13,
46911,
1634,
1330,
3440,
62,
40211,
198,
198,
6738,
4755,
76,
2528,
10141,
13,
27530,
13,
710,
1523,
62,
27349,
1330,
47986,
26245,
32875,
198,
6738... | 2.269327 | 3,208 |
# Configuración necesaria para elasticsearch
INDEX_ELASTIC = "parse-speech"
HOST_ELASTIC = "http://localhost:8000"
| [
2,
17056,
333,
32009,
18840,
497,
728,
10312,
31215,
27468,
12947,
198,
198,
12115,
6369,
62,
3698,
11262,
2149,
796,
366,
29572,
12,
45862,
1,
198,
39,
10892,
62,
3698,
11262,
2149,
796,
366,
4023,
1378,
36750,
25,
33942,
1,
198
] | 2.829268 | 41 |
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import stat
import subprocess
import re
import netaddr
from orderedattrdict import AttrDict
from enum import Enum
from filelock import Timeout, FileLock
from socket import gethostbyname
from time import sleep
from random import random
import lib.logger as logger
from lib.ssh import SSH
from lib.switch_exception import SwitchException
from lib.genesis import get_switch_lock_path
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
SWITCH_LOCK_PATH = get_switch_lock_path()
| [
2,
15069,
13130,
19764,
11421,
13,
198,
2,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846... | 3.609272 | 302 |
import numpy as np
# import pandas as pd
from PetastormDataLoaders.PetastormDataModule import PetastormDataModule
from PetastormDataLoaders.PetastormDataLoader import TransformersDataLoader
# from petastorm.pytorch import DataLoader
from petastorm.transform import TransformSpec
from transformers import AutoTokenizer
# class TextParseDataModule(PetastormDataModule):
# def __init__(self, train_path, val_path, batch_size=16, num_minibatchs=8, num_workers=4, text_column="", target_column="target"):
# # Tokenised columns replace text based ones
# tokenizer = AutoTokenizer.from_pretrained("bert-base-cased", fast=True)
# tokenize = lambda rows: pd.concat((rows[[target_column]], pd.DataFrame([tokenizer(rows[text_column].tolist(), padding=True, truncation=True, return_tensors="np").data])))
# edit_fields = [("token_type_ids", np.array, (), False), ("attention_mask", np.array, (), False), ("input_ids", np.array, (), False)]
# transform_spec = TransformSpec(tokenize, selected_fields=["token_type_ids", "attention_mask", "input_ids", target_column], edit_fields=edit_fields)
# super().__init__(train_path, val_path, batch_size, num_minibatchs, num_workers, transform_spec)
# def train_dataloader(self):
# return DataLoader(self.train_dataset)
# def val_dataloader(self):
# return DataLoader(self.val_dataset)
| [
11748,
299,
32152,
355,
45941,
198,
2,
1330,
19798,
292,
355,
279,
67,
198,
198,
6738,
4767,
459,
579,
6601,
8912,
364,
13,
25803,
459,
579,
6601,
26796,
1330,
4767,
459,
579,
6601,
26796,
198,
6738,
4767,
459,
579,
6601,
8912,
364,
... | 2.765873 | 504 |
#!/usr/bin/env python
from fragpy.min import optf
from fragpy.opt_param import param
''' RUN THIS PYHON SCRIPT
The following example corresponds to example2.inp
to be run as a python scrip rather than with
fragpy/fragpy.
'''
coord = [
['N1', -0.42041893 , -0.13177839 , -1.10917804,1 ],
['H1', -1.40858582 , 0.11956038 , -1.24870378,1 ],
['H1', -0.28384558 , -1.11091815 , -1.39903445,1 ],
['C1', -0.00216871 , 0.08483180 , 0.25616783,1 ],
['H1', -0.39265828 , 1.06387798 , 0.60162420,1 ],
['H1', -0.39940372 , -0.67976915 , 0.96544436,1 ],
['O1', 2.12202140 , -0.05541813 , 1.41852289,1 ],
['C1', 1.50690877 , 0.11681055 , 0.35450000,1 ],
['N1', 2.17229633 , 0.41300059 , -0.80613194,1 ],
['H1', 1.61743925 , 0.39099617 , -1.67914197,1 ],
['C1', 3.60579100 , 0.26015800 , -0.88365334,1 ],
['H1', 3.98655986 , 0.85892529 , -1.73685540,1 ],
['H1', 3.89241346 , -0.79855733 , -1.07385837,1 ],
['O2', 5.08781813 , 0.04132250 , 1.01281932,1 ],
['C2', 4.27464242 , 0.73395899 , 0.39867596,1 ],
['N2', 3.98348175 , 2.01878737 , 0.79938714,1 ],
['H2', 3.43282493 , 2.63099933 , 0.18309903,1 ],
['C2', 4.76122593 , 2.61623801 , 1.86407242,1 ],
['H2', 4.35676781 , 3.62599345 , 2.06786396,1 ],
['H2', 5.82595876 , 2.73751594 , 1.56692752,1 ],
['O2', 5.66511388 , 1.59445391 , 3.86333589,1 ],
['C2', 4.68630506 , 1.77805166 , 3.13675472,1 ],
['N2', 3.43637104 , 1.31311371 , 3.46034543,1 ],
['H2', 2.72977240 , 1.24393506 , 2.70650062,1 ],
['C2', 3.29169884 , 0.42278580 , 4.59426461,1 ],
['H2', 2.23254922 , 0.11078791 , 4.65912992,1 ],
['H2', 3.54711516 , 0.95336540 , 5.53314597,1 ],
['O3', 4.85766663 , -1.23688681 , 5.39578269,2 ],
['C3', 4.19277714 , -0.79728605 , 4.45029532,2 ],
['N3', 4.21805383 , -1.38043523 , 3.21570919,2 ],
['H3', 3.70456393 , -0.95089266 , 2.42777437,2 ],
['C3', 5.15543907 , -2.44929239 , 2.95191561,2 ],
['H3', 5.16119982 , -2.64413492 , 1.86303211,2 ],
['H3', 4.84740166 , -3.38764828 , 3.46142052,2 ],
['O3', 7.28708591 , -2.83640913 , 4.04203818,2 ],
['C3', 6.55593624 , -2.05991806 , 3.41540997,2 ],
['N3', 6.95745139 , -0.80243917 , 3.06803373,2 ],
['H3', 6.31824603 , -0.18640966 , 2.53819112,2 ],
['C3', 8.24828658 , -0.30447368 , 3.48116391,2 ],
['H3', 8.32134351 , 0.75784161 , 3.18076515,2 ],
['H3', 9.07228944 , -0.85568855 , 2.97773832,2 ],
['O4', 9.44612832 , -0.85375707 , 5.51446570,2 ],
['C4', 8.40838410 , -0.42709013 , 4.99176297,2 ],
['N4', 7.33672802 , -0.01428212 , 5.72685718,2 ],
['H4', 6.48290593 , 0.31651436 , 5.24560356,2 ],
['C4', 7.33144696 , -0.17643313 , 7.16220648,2 ],
['H4', 6.32158938 , 0.07872157 , 7.53530362,2 ],
['H4', 8.05456357 , 0.51512083 , 7.64713692,2 ],
['O4', 8.47655332 , -1.89355090 , 8.43762959,2 ],
['C4', 7.67674093 , -1.61515820 , 7.53963818,2 ],
['N4', 7.02017667 , -2.57690835 , 6.82415828,2 ],
['H4', 6.38612912 , -2.30502067 , 6.05316391,2 ],
['C4', 7.33424940 , -3.97086346 , 7.03263129,2 ],
['H4', 6.80823386 , -4.56441292 , 6.26085641,2 ],
['H4', 6.98075897 , -4.31935140 , 8.02740912,2 ],
['O5', 9.47233934 , -4.87302407 , 7.74516912,3 ],
['C5', 8.83632739 , -4.19656638 , 6.92497489,3 ],
['N5', 9.44330054 , -3.61764129 , 5.84936900,3 ],
['H5', 8.88890744 , -3.04173114 , 5.19149249,3 ],
['C5', 10.87616636 , -3.70638315 , 5.69832987,3 ],
['H5', 11.17810443 , -3.04909108 , 4.86045458,3 ],
['H5', 11.19174821 , -4.74211723 , 5.44257272,3 ],
['O5', 12.56528232 , -3.86197910 , 7.43608910,3 ],
['C5', 11.58286851 , -3.26232838 , 6.97630903,3 ],
['N5', 11.07414665 , -2.13994682 , 7.55710745,3 ],
['H5', 10.25510367 , -1.67359971 , 7.12811530,3 ],
['C5', 11.66860726 , -1.56637431 , 8.73858481,3 ],
['H5', 11.11338877 , -0.64121894 , 8.98583164,3 ],
['H5', 12.72446136 , -1.27272837 , 8.54641430,3 ],
['O6', 12.39269953 , -2.34061445 , 10.90565938,3 ],
['C6', 11.63485786 , -2.50219253 , 9.94436411,3 ],
['N6', 10.69325774 , -3.49181741 , 9.91408395,3 ],
['H6', 10.10356817 , -3.62548672 , 9.07112240,3 ],
['C6', 10.64471524 , -4.49099994 , 10.94918996,3 ],
['H6', 9.63087548 , -4.54552572 , 11.40367710,3 ],
['H6', 11.35087710 , -4.17692745 , 11.74614054,3 ],
['O6', 10.66826545 , -6.91016100 , 10.98212537,3 ],
['C6', 11.04013470 , -5.86793144 , 10.43256953,3 ],
['N6', 11.87179677 , -5.87934694 , 9.34419994,3 ],
['H6', 12.08001412 , -4.99459148 , 8.83777328,3 ],
['C6', 12.18268597 , -7.12175759 , 8.68978017,3 ],
['H6', 11.69473019 , -7.93252900 , 9.27362748,3 ],
['H6', 13.27873890 , -7.32711376 , 8.69085212,3 ],
['C6', 11.65589770 , -7.13960974 , 7.26837917,3 ],
['H6', 10.86303080 , -6.34878391 , 7.03486361,3 ],
['O6', 12.02822794 , -7.92704503 , 6.43786974,3 ]]
param.coarse=True
param.coarse_level=1
optf(coord)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
7956,
9078,
13,
1084,
1330,
2172,
69,
198,
6738,
7956,
9078,
13,
8738,
62,
17143,
1330,
5772,
198,
198,
7061,
6,
220,
32494,
12680,
350,
56,
39,
1340,
6374,
46023,
220,
198,
46... | 1.749565 | 2,871 |
#!/usr/bin/env python
################################################################################
# _ ____ ___ _____ _ _ _ _ #
# / \ / ___|_ _| |_ _|__ ___ | | | _(_) |_ #
# / _ \| | | | | |/ _ \ / _ \| | |/ / | __| #
# / ___ \ |___ | | | | (_) | (_) | | <| | |_ #
# ____ /_/ \_\____|___|___|_|\___/ \___/|_|_|\_\_|\__| #
# / ___|___ __| | ___ / ___| __ _ _ __ ___ _ __ | | ___ ___ #
# | | / _ \ / _` |/ _ \ \___ \ / _` | '_ ` _ \| '_ \| |/ _ \/ __| #
# | |__| (_) | (_| | __/ ___) | (_| | | | | | | |_) | | __/\__ \ #
# \____\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|\___||___/ #
# |_| #
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Simple application to define 2 EPGs with a contract between them and statically
connecting the EPGs to specific interfaces using a specific VLANs.
It logs in to the APIC and will create the tenant, application profile,
EPGs, and Contract if they do not exist already. It then connects it to the
specified interface using the VLAN encapsulation specified.
Before running, examine the code and change the interface information if desired
as well as the VLAN.
"""
from acitoolkit import (Credentials, Session, Tenant, AppProfile, EPG, BridgeDomain, Context,
Interface, L2Interface, Contract, FilterEntry)
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = ('Simple application to define 2 EPGs with a contract between them and statically '
'connecting the EPGs to specific interfaces using a specific VLANs.')
creds = Credentials('apic', description)
args = creds.get()
# Login to the APIC
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
# Create the Tenant, App Profile, and EPGs
tenant = Tenant('acitoolkit-attach-with-contract')
app = AppProfile('myapp', tenant)
first_epg = EPG('firstepg', app)
second_epg = EPG('secondepg', app)
# Create the Contract to permit only ARP and ICMP
contract = Contract('mycontract', tenant)
icmp_entry = FilterEntry('icmpentry',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='unspecified',
dToPort='unspecified',
etherT='ip',
prot='icmp',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract)
arp_entry = FilterEntry('arpentry',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='unspecified',
dToPort='unspecified',
etherT='arp',
prot='unspecified',
sFromPort='unspecified',
sToPort='unspecified',
tcpRules='unspecified',
parent=contract)
tcp_entry = FilterEntry('tcpentry',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='5000',
dToPort='5010',
etherT='ip',
prot='tcp',
sFromPort='5000',
sToPort='5010',
tcpRules='unspecified',
parent=contract)
udp_entry = FilterEntry('udpentry',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='5000',
dToPort='5010',
etherT='ip',
prot='udp',
sFromPort='5000',
sToPort='5010',
tcpRules='unspecified',
parent=contract)
# Provide and consume the Contract
first_epg.provide(contract)
second_epg.consume(contract)
# Create the networking stuff and put both EPGs in the same BridgeDomain
vrf = Context('vrf-1', tenant)
bd = BridgeDomain('bd-1', tenant)
bd.add_context(vrf)
first_epg.add_bd(bd)
second_epg.add_bd(bd)
# Create the physical interface objects representing the physical ethernet ports
first_intf = Interface('eth', '1', '101', '1', '17')
second_intf = Interface('eth', '1', '102', '1', '17')
# Create a VLAN interface and attach to each physical interface
first_vlan_intf = L2Interface('vlan5-on-eth1-101-1-17', 'vlan', '5')
first_vlan_intf.attach(first_intf)
second_vlan_intf = L2Interface('vlan5-on-eth1-102-1-17', 'vlan', '5')
second_vlan_intf.attach(second_intf)
# Attach the EPGs to the VLAN interfaces
first_epg.attach(first_vlan_intf)
second_epg.attach(second_vlan_intf)
# Push the tenant configuration to the APIC
resp = session.push_to_apic(tenant.get_url(),
tenant.get_json())
if not resp.ok:
print('%% Error: Could not push the tenant configuration to APIC')
# Push the interface attachments to the APIC
resp = first_intf.push_to_apic(session)
if not resp.ok:
print('%% Error: Could not push interface configuration to APIC')
resp = second_intf.push_to_apic(session)
if not resp.ok:
print('%% Error: Could not push interface configuration to APIC')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
29113,
14468,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
4808,
220,
220,
220,
220,
1427,
46444,
220,
220,
220,
29343,
220,
220,
... | 1.949464 | 3,641 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='pynode',
version='0.1.0',
description='A python module for building a python task worker run by node.js, providing two-way communication based on socket.io.',
author='Christopher S. Case',
author_email='chris.case@g33xnexus.com',
url='https://github.com/Morgul/nodepy',
packages=['pynode'],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
79,
2047,
1098,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
3256,
198,
220,
... | 2.6 | 155 |
from django.conf.urls import patterns, url
from scout.properties import views
urlpatterns = patterns(
'',
url(r'^$', views.property_list_view, name='list'),
url(r'^add/$', views.property_add_view, name='add'),
url(r'^edit/(?P<pk>\d+)/$', views.property_edit_view, name='edit'),
) | [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
7572,
11,
19016,
198,
198,
6738,
24490,
13,
48310,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
7572,
7,
198,
220,
220,
220,
705,
3256,
198,
220,
220,
220,
19016,
7,
81,
6,
61,
3,
... | 2.525424 | 118 |
from shape import shape
#.keys = the list of keys that are being pressed (since the last processed frame)
#.move - moves the birdy safely (and returns True if succeeded)
#.sets the next action for the bird (to jump or to fall, + indicators for how much to jump/fall)
#.gravity makes birdy fall/jump based on .jump and .fall indicators left move move_set
shapes = [
[
['\\','/'],
['/','\\']
]
]
| [
198,
6738,
5485,
1330,
5485,
198,
2,
13,
13083,
796,
262,
1351,
286,
8251,
326,
389,
852,
12070,
357,
20777,
262,
938,
13686,
5739,
8,
198,
2,
13,
21084,
532,
6100,
262,
6512,
88,
11512,
357,
392,
5860,
6407,
611,
14131,
8,
198,
2... | 3.058394 | 137 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rlkit.policies.base import Policy
from rlkit.torch.core import PyTorchModule, eval_np
class ImageStatePolicy(PyTorchModule, Policy):
"""Switches between image or state inputs"""
class ImageStateQ(PyTorchModule):
"""Switches between image or state inputs"""
| [
2,
15069,
33448,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.625532 | 235 |
#!/usr/bin/env python2
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
"""
Classify an image using individual model files
Use this script as an example to build your own tool
"""
from caffe.proto import caffe_pb2
import caffe
import argparse
import os
import time
import pydevd
from google.protobuf import text_format
import numpy as np
import PIL.Image
import scipy.misc
os.environ['GLOG_minloglevel'] = '2' # Suppress most caffe output
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_transformer(deploy_file, mean_file=None):
"""
Returns an instance of caffe.io.Transformer
Arguments:
deploy_file -- path to a .prototxt file
Keyword arguments:
mean_file -- path to a .binaryproto file (optional)
"""
network = caffe_pb2.NetParameter()
with open(deploy_file) as infile:
text_format.Merge(infile.read(), network)
if network.input_shape:
dims = network.input_shape[0].dim
else:
dims = network.input_dim[:4]
t = caffe.io.Transformer(
inputs={'data': dims}
)
# transpose to (channels, height, width)
t.set_transpose('data', (2, 0, 1))
# color images
if dims[1] == 3:
# channel swap
t.set_channel_swap('data', (2, 1, 0))
if mean_file:
# set mean pixel
with open(mean_file, 'rb') as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
if blob.HasField('shape'):
blob_dims = blob.shape
assert len(
blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
elif blob.HasField('num') and blob.HasField('channels') and \
blob.HasField('height') and blob.HasField('width'):
blob_dims = (blob.num, blob.channels, blob.height, blob.width)
else:
raise ValueError(
'blob does not provide shape or 4d dimensions')
pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
t.set_mean('data', pixel)
return t
def load_image(path, height, width, mode='RGB'):
"""
Load an image from disk
Returns an np.ndarray (channels x width x height)
Arguments:
path -- path to an image on disk
width -- resize dimension
height -- resize dimension
Keyword arguments:
mode -- the PIL mode that the image should be converted to
(RGB for color or L for grayscale)
"""
image = PIL.Image.open(path)
image = image.convert(mode)
image = np.array(image)
# squash
image = scipy.misc.imresize(image, (height, width), 'bilinear')
return image
def forward_pass(images, net, transformer, batch_size=None):
"""
Returns scores for each image as an np.ndarray (nImages x nClasses)
Arguments:
images -- a list of np.ndarrays
net -- a caffe.Net
transformer -- a caffe.io.Transformer
Keyword arguments:
batch_size -- how many images can be processed at once
(a high value may result in out-of-memory errors)
"""
if batch_size is None:
batch_size = 16
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:, :, np.newaxis])
else:
caffe_images.append(image)
dims = transformer.inputs['data'][1:]
scores = None
for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
new_shape = (len(chunk),) + tuple(dims)
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
image_data = transformer.preprocess('data', image)
net.blobs['data'].data[index] = image_data
start = time.time()
output = net.forward()[net.outputs[-1]]
end = time.time()
if scores is None:
scores = np.copy(output)
else:
scores = np.vstack((scores, output))
print('Processed %s/%s images in %f seconds ...' %
(len(scores), len(caffe_images), (end - start)))
return scores
def read_labels(labels_file):
"""
Returns a list of strings
Arguments:
labels_file -- path to a .txt file
"""
if not labels_file:
print('WARNING: No labels file provided. Results will be difficult to interpret.')
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels), 'No labels found'
return labels
def classify(caffemodel, deploy_file, image_files,
mean_file=None, labels_file=None, batch_size=None, use_gpu=True):
"""
Classify some images against a Caffe model and print the results
Arguments:
caffemodel -- path to a .caffemodel
deploy_file -- path to a .prototxt
image_files -- list of paths to images
Keyword arguments:
mean_file -- path to a .binaryproto
labels_file path to a .txt file
use_gpu -- if True, run inference on the GPU
"""
# Load the model and images
net = get_net(caffemodel, deploy_file, use_gpu)
transformer = get_transformer(deploy_file, mean_file)
_, channels, height, width = transformer.inputs['data']
if channels == 3:
mode = 'RGB'
elif channels == 1:
mode = 'L'
else:
raise ValueError('Invalid number for channels: %s' % channels)
images = [load_image(image_file, height, width, mode)
for image_file in image_files]
images = 33*images
labels = read_labels(labels_file)
# Classify the image
scores = forward_pass(images, net, transformer, batch_size=batch_size)
# Process the results
indices = (-scores).argsort()[:, :5] # take top 5 results
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# 'i' is a category in labels and also an index into scores
if labels is None:
label = 'Class #%s' % i
else:
label = labels[i]
result.append((label, round(100.0*scores[image_index, i], 4)))
classifications.append(result)
if __name__ == '__main__':
# pydevd.settrace()
script_start_time = time.time()
parser = argparse.ArgumentParser(
description='Classification example - DIGITS')
# Positional arguments
parser.add_argument('caffemodel', help='Path to a .caffemodel')
parser.add_argument('deploy_file', help='Path to the deploy file')
parser.add_argument('image_file',
nargs='+',
help='Path[s] to an image')
# Optional arguments
parser.add_argument('-m', '--mean',
help='Path to a mean file (*.npy)')
parser.add_argument('-l', '--labels',
help='Path to a labels file')
parser.add_argument('--batch-size',
type=int)
parser.add_argument('--nogpu',
action='store_true',
help="Don't use the GPU")
args = vars(parser.parse_args())
classify(args['caffemodel'], args['deploy_file'], args['image_file'],
args['mean'], args['labels'], args['batch_size'], not args['nogpu'])
print('Script took %f seconds.' % (time.time() - script_start_time,))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
15069,
357,
66,
8,
1853,
12,
5304,
11,
15127,
23929,
44680,
6234,
13,
220,
1439,
2489,
10395,
13,
198,
198,
37811,
198,
9487,
1958,
281,
2939,
1262,
1981,
2746,
3696,
198,
198... | 2.333726 | 3,395 |
import os
from datetime import datetime
import logging
import requests
import json
import pandas as pd
from pycountry import countries
from git import Repo
from config_corona import ConfigCorona
# Set LOGGER
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Adding a stream handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
LOGGER.addHandler(ch)
LOGGER.info("Added StreamHandler")
class AddDailyFields():
"""
Takes the daily running total columns and
adds in daily new columns and PREV day
"""
def create_daily_new_col(self):
"""
Here we create a daily new column
"""
df = self.data
# Prep dataset 1-day delta to get day-over-day change
df["previous_days_date"] = df["date"] + pd.np.timedelta64(1, "D")
previous_df = df.copy()
previous_df = previous_df[[
self.key_col,
"previous_days_date",
self.col_confirmed,
]]
previous_df = previous_df.rename(
columns={self.col_confirmed: self.col_confirmed_prev_day}
)
# Join dataset to itself with a one day offset to get daily diff
df = df.merge(
previous_df,
how="left",
left_on=[self.key_col, "date"],
right_on=[self.key_col, "previous_days_date"],
)
df = df.drop(["previous_days_date_x", "previous_days_date_y"], axis=1)
df[self.col_confirmed_prev_day] = df[self.col_confirmed_prev_day].fillna(0)
df[self.col_daily_new] = (
df[self.col_confirmed] - df[self.col_confirmed_prev_day]
)
self.data = df
def push_output_to_github():
"""
Automatically push changes in output_data/ to GitHub
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
repo = Repo(repo_dir)
file_list = [
'coronavirus/output_data/HOPKINS_CLEANED.csv',
'coronavirus/output_data/JHU_aggregated_country_and_day.csv',
'coronavirus/output_data/NYT_US_state_data.csv',
'coronavirus/output_data/US_causes_of_death.csv',
]
# Add and commit
today = datetime.today().strftime('%Y-%m-%d')
commit_message = f'Adding output data for {today}'
repo.index.add(file_list)
repo.index.commit(commit_message)
# Push
origin = repo.remote('origin')
origin.push()
def send_slack(text):
"""
Sends simple Slack message
"""
config = ConfigCorona()
try:
url = config.slack_url
data = {'text':text}
response = requests.post(url,
data=json.dumps(data),
headers={'Content-Type': 'application/json'})
if response:
LOGGER.info("======= Slack Success =======")
else:
LOGGER.warning("SLACK ERROR")
except Exception as e:
LOGGER.warning(f"Slack message is failing {e}")
def pull_median_country_age():
"""
Pull median age by country data from Wikipedia
"""
age_wiki = pd.read_html('https://en.wikipedia.org/wiki/List_of_countries_by_median_age')
df = age_wiki[0]
df.columns = df.columns.str.lower()
df.columns = ['country','rank','median_years','male_years','female_years']
# Add country codes
df['country_code_2'] = df.apply(pandas_add_cc_2, axis=1)
df['country_code_3'] = df.apply(pandas_add_cc_3, axis=1)
df.loc[df['country'] == 'Virgin Islands', 'country_code_2'] = 'VG'
df.loc[df['country'] == 'Virgin Islands', 'country_code_3'] = 'VGB'
df.loc[df['country'] == 'Curacao', 'country_code_2'] = 'CW'
df.loc[df['country'] == 'Curacao', 'country_code_3'] = 'CUW'
df.loc[df['country'] == 'Sint Maarten', 'country_code_2'] = 'SX'
df.loc[df['country'] == 'Sint Maarten', 'country_code_3'] = 'SXM'
df.loc[df['country'] == 'Kosovo', 'country_code_2'] = 'XK'
df.loc[df['country'] == 'Kosovo', 'country_code_3'] = 'XKX'
df.loc[df['country'] == 'Niger', 'country_code_2'] = 'NE'
df.loc[df['country'] == 'Niger', 'country_code_3'] = 'NER'
df = df.drop_duplicates('country_code_3')
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'ref_median_age_country.csv')
df.to_csv(filename, index=False)
def pull_US_state_population_data():
"""
Scrapes a Wikipedia page to pull US state population data
"""
wiki = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states_by_population')
df = wiki[0]
df.columns = df.columns.str.lower().str.replace(' ','_')
df = df[['state','population_estimate,_july_1,_2019[2]']]
df.columns = ['state','us_state_pop_2019_estimate']
df.loc[df['state'] == 'District of Columbia', 'state'] = 'Washington DC'
df.loc[df['state'] == 'U.S. Virgin Islands', 'state'] = 'Virgin Islands'
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'ref_us_state_population.csv')
df.to_csv(filename, index=False)
return df
def create_FIPS_ref_data():
"""
Load up a FIPS ref table from GitHub
"""
FIPS = pd.read_json('https://raw.githubusercontent.com/josh-byster/fips_lat_long/master/fips_map.json')
FIPS = FIPS.transpose().reset_index()
FIPS = FIPS.rename(columns={'index':'fips'})
FIPS['fips'] = FIPS['fips'].astype(str)
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'FIPS_ref_data.csv')
FIPS.to_csv(filename, index=False)
def load_FIPS_data():
""" Load FIPS ref table """
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'FIPS_ref_data.csv')
df = pd.read_csv(filename)
df['fips'] = df['fips'].astype(str)
return df
def load_ref_US_state():
""" Load US state code ref table """
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'ref_table_us_states.csv')
df = pd.read_csv(filename)
df.columns = df.columns.str.lower().str.replace(' ','_')
return df
def load_ref_US_county_info():
""" Load US county info ref table
Unique to the county, state, latitude, and longitude
"""
directory = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
filename = os.path.join(directory, 'ref_data', 'county_to_zip_ref.csv')
df = pd.read_csv(filename)
df.columns = df.columns.str.lower().str.replace(' ','_')
return df
def pandas_add_cc_2(row):
"""Pandas function for pulling 2-letter country code"""
country = row['country']
#print(country)
try:
alpha_2 = countries.search_fuzzy(country)[0].alpha_2
return alpha_2
except LookupError:
print("no dice")
pass
def pandas_add_cc_3(row):
"""Pandas function for pulling 3-letter country code"""
country = row['country']
#print(country)
try:
alpha_3 = countries.search_fuzzy(country)[0].alpha_3
return alpha_3
except LookupError:
print("no dice")
pass
| [
11748,
28686,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
12972,
19315,
1330,
2678,
198,
6738,
17606,
1330,
1432,
78,
198,
6738,
4566,
... | 2.295026 | 3,237 |
from cc3d import CompuCellSetup
from diffusion_steady_state_ext_potential_3DSteppables import diffusion_steady_state_ext_potential_3DSteppable
CompuCellSetup.register_steppable(steppable=diffusion_steady_state_ext_potential_3DSteppable(frequency=1))
CompuCellSetup.run()
| [
6738,
36624,
18,
67,
1330,
3082,
84,
28780,
40786,
198,
198,
6738,
44258,
62,
28044,
88,
62,
5219,
62,
2302,
62,
13059,
1843,
62,
18,
5258,
660,
381,
2977,
1330,
44258,
62,
28044,
88,
62,
5219,
62,
2302,
62,
13059,
1843,
62,
18,
5... | 2.824742 | 97 |
"""
Автор: Моисеенко Павел, группа № 1, подгруппа № 2.
Задание: разработайте скрипт с функцией, которая для ряда Фибо-
наччи, где количество элементов, n = 22, возвращает подмножество
значений или единственное значение (по вариантам). Для нахождения
элемента требуется использовать слайсы. Формирование отчета по
выполнению задания и размещение его в портфолио, персональном
репозитории.
"""
main()
| [
37811,
198,
220,
220,
220,
12466,
238,
38857,
20375,
15166,
21169,
25,
12466,
250,
15166,
18849,
21727,
16843,
16843,
22177,
31583,
15166,
12466,
253,
16142,
38857,
16843,
30143,
11,
12466,
111,
21169,
35072,
140,
123,
140,
123,
16142,
2343... | 0.957965 | 452 |
if __name__ == '__main__':
S = Solution()
a = S.longestValidParentheses(")(((((()())()()))()(()))(")
print(a) | [
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
311,
796,
28186,
3419,
198,
220,
220,
220,
257,
796,
311,
13,
6511,
395,
47139,
24546,
39815,
7,
4943,
19510,
19510,
7,
3419,
28955,
3419,
... | 2.155172 | 58 |
import xarray as xr
import json
import time
import pandas as pd
import numpy as np
import warnings
from six import StringIO
from datetime import datetime
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
def get_asos(time_window, lat_range=None,
lon_range=None, station=None):
"""
Returns all of the station observations from the Iowa Mesonet from either
a given latitude and longitude window or a given station code.
Parameters
----------
time_window: tuple
A 2 member list or tuple containing the start and end times. The
times must be python datetimes.
lat_range: tuple
The latitude window to grab all of the ASOS observations from.
lon_range: tuple
The longitude window to grab all of the ASOS observations from.
station: str
The station ID to grab the ASOS observations from.
Returns
-------
asos_ds: dict of xarray datasets
A dictionary of ACT datasets whose keys are the ASOS station IDs.
Examples
--------
If you want to obtain timeseries of ASOS observations for Chicago O'Hare
Airport, simply do::
$ time_window = [datetime(2020, 2, 4, 2, 0), datetime(2020, 2, 10, 10, 0)]
$ station = "KORD"
$ my_asoses = act.discovery.get_asos(time_window, station="ORD")
"""
# First query the database for all of the JSON info for every station
# Only add stations whose lat/lon are within the Grid's boundaries
regions = """AF AL_ AI_ AQ_ AG_ AR_ AK AL AM_
AO_ AS_ AR AW_ AU_ AT_
AZ_ BA_ BE_ BB_ BG_ BO_ BR_ BF_
BT_ BS_ BI_ BM_ BB_ BY_ BZ_ BJ_ BW_ AZ CA CA_AB
CA_BC CD_ CK_ CF_ CG_ CL_ CM_ CO CO_ CN_ CR_ CT
CU_ CV_ CY_ CZ_ DE DK_ DJ_ DM_ DO_
DZ EE_ ET_ FK_ FM_ FJ_ FI_ FR_ GF_ PF_
GA_ GM_ GE_ DE_ GH_ GI_ KY_ GB_ GR_ GL_ GD_
GU_ GT_ GN_ GW_ GY_ HT_ HN_ HK_ HU_ IS_ IN_
ID_ IR_ IQ_ IE_ IL_ IT_ CI_ JM_ JP_
JO_ KZ_ KE_ KI_ KW_ LA_ LV_ LB_ LS_ LR_ LY_
LT_ LU_ MK_ MG_ MW_ MY_ MV_ ML_ CA_MB
MH_ MR_ MU_ YT_ MX_ MD_ MC_ MA_ MZ_ MM_ NA_ NP_
AN_ NL_ CA_NB NC_ CA_NF NF_ NI_
NE_ NG_ MP_ KP_ CA_NT NO_ CA_NS CA_NU OM_
CA_ON PK_ PA_ PG_ PY_ PE_ PH_ PN_ PL_
PT_ CA_PE PR_ QA_ CA_QC RO_ RU_RW_ SH_ KN_
LC_ VC_ WS_ ST_ CA_SK SA_ SN_ RS_ SC_
SL_ SG_ SK_ SI_ SB_ SO_ ZA_ KR_ ES_ LK_ SD_ SR_
SZ_ SE_ CH_ SY_ TW_ TJ_ TZ_ TH_
TG_ TO_ TT_ TU TN_ TR_ TM_ UG_ UA_ AE_ UN_ UY_
UZ_ VU_ VE_ VN_ VI_ YE_ CA_YT ZM_ ZW_
EC_ EG_ FL GA GQ_ HI HR_ IA ID IL IO_ IN KS
KH_ KY KM_ LA MA MD ME
MI MN MO MS MT NC ND NE NH NJ NM NV NY OH OK
OR PA RI SC SV_ SD TD_ TN TX UT VA VT VG_
WA WI WV WY"""
networks = ["AWOS"]
metadata_list = {}
if lat_range is not None and lon_range is not None:
lon_min, lon_max = lon_range
lat_min, lat_max = lat_range
for region in regions.split():
networks.append("%s_ASOS" % (region,))
site_list = []
for network in networks:
# Get metadata
uri = ("https://mesonet.agron.iastate.edu/"
"geojson/network/%s.geojson") % (network,)
data = urlopen(uri)
jdict = json.load(data)
for site in jdict["features"]:
lat = site["geometry"]["coordinates"][1]
lon = site["geometry"]["coordinates"][0]
if lat >= lat_min and lat <= lat_max:
if lon >= lon_min and lon <= lon_max:
station_metadata_dict = {}
station_metadata_dict["site_latitude"] = lat
station_metadata_dict["site_longitude"] = lat
for my_keys in site["properties"]:
station_metadata_dict[my_keys] = site["properties"][my_keys]
metadata_list[site["properties"]["sid"]] = station_metadata_dict
site_list.append(site["properties"]["sid"])
elif station is not None:
site_list = [station]
for region in regions.split():
networks.append("%s_ASOS" % (region,))
for network in networks:
# Get metadata
uri = ("https://mesonet.agron.iastate.edu/"
"geojson/network/%s.geojson") % (network,)
data = urlopen(uri)
jdict = json.load(data)
for site in jdict["features"]:
lat = site["geometry"]["coordinates"][1]
lon = site["geometry"]["coordinates"][0]
if site["properties"]["sid"] == station:
station_metadata_dict = {}
station_metadata_dict["site_latitude"] = lat
station_metadata_dict["site_longitude"] = lon
for my_keys in site["properties"]:
if my_keys == "elevation":
station_metadata_dict["elevation"] = \
'%f meter' % site["properties"][my_keys]
else:
station_metadata_dict[my_keys] = \
site["properties"][my_keys]
metadata_list[station] = station_metadata_dict
# Get station metadata
else:
raise ValueError("Either both lat_range and lon_range or station must " +
"be specified!")
# Get the timestamp for each request
start_time = time_window[0]
end_time = time_window[1]
SERVICE = "http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?"
service = SERVICE + "data=all&tz=Etc/UTC&format=comma&latlon=yes&"
service += start_time.strftime("year1=%Y&month1=%m&day1=%d&hour1=%H&minute1=%M&")
service += end_time.strftime("year2=%Y&month2=%m&day2=%d&hour2=%H&minute2=%M")
station_obs = {}
for stations in site_list:
uri = "%s&station=%s" % (service, stations)
print("Downloading: %s" % (stations,))
data = _download_data(uri)
buf = StringIO()
buf.write(data)
buf.seek(0)
my_df = pd.read_csv(buf, skiprows=5, na_values="M")
if len(my_df['lat'].values) == 0:
warnings.warn(
"No data available at station %s between time %s and %s" %
(stations, start_time.strftime('%Y-%m-%d %H:%M:%S'),
end_time.strftime('%Y-%m-%d %H:%M:%S')))
else:
my_df["time"] = my_df["valid"].apply(to_datetime)
my_df = my_df.set_index("time")
my_df = my_df.drop("valid", axis=1)
my_df = my_df.drop("station", axis=1)
my_df = my_df.to_xarray()
my_df.attrs = metadata_list[stations]
my_df["lon"].attrs["units"] = "degree"
my_df["lon"].attrs["long_name"] = "Longitude"
my_df["lat"].attrs["units"] = "degree"
my_df["lat"].attrs["long_name"] = "Latitude"
my_df["tmpf"].attrs["units"] = "degrees Fahrenheit"
my_df["tmpf"].attrs["long_name"] = "Temperature in degrees Fahrenheit"
# Fahrenheit to Celsius
my_df["temp"] = (5. / 9. * my_df["tmpf"]) - 32.0
my_df["temp"].attrs["units"] = "degrees Celsius"
my_df["temp"].attrs["long_name"] = "Temperature in degrees Celsius"
my_df["dwpf"].attrs["units"] = "degrees Fahrenheit"
my_df["dwpf"].attrs["long_name"] = "Dewpoint temperature in degrees Fahrenheit"
# Fahrenheit to Celsius
my_df["dwpc"] = (5. / 9. * my_df["tmpf"]) - 32.0
my_df["dwpc"].attrs["units"] = "degrees Celsius"
my_df["dwpc"].attrs["long_name"] = "Dewpoint temperature in degrees Celsius"
my_df["relh"].attrs["units"] = "percent"
my_df["relh"].attrs["long_name"] = "Relative humidity"
my_df["drct"].attrs["units"] = "degrees"
my_df["drct"].attrs["long_name"] = "Wind speed in degrees"
my_df["sknt"].attrs["units"] = "knots"
my_df["sknt"].attrs["long_name"] = "Wind speed in knots"
my_df["spdms"] = my_df["sknt"] * 0.514444
my_df["spdms"].attrs["units"] = "m s-1"
my_df["spdms"].attrs["long_name"] = "Wind speed in meters per second"
my_df['u'] = -np.sin(np.deg2rad(my_df["drct"])) * my_df["spdms"]
my_df['u'].attrs["units"] = "m s-1"
my_df['u'].attrs["long_name"] = "Zonal component of surface wind"
my_df['v'] = -np.cos(np.deg2rad(my_df["drct"])) * my_df["spdms"]
my_df['v'].attrs["units"] = "m s-1"
my_df['v'].attrs["long_name"] = "Meridional component of surface wind"
my_df["mslp"].attrs["units"] = "mb"
my_df["mslp"].attrs["long_name"] = "Mean Sea Level Pressure"
my_df["alti"].attrs["units"] = "in Hg"
my_df["alti"].attrs["long_name"] = "Atmospheric pressure in inches of Mercury"
my_df["vsby"].attrs["units"] = "mi"
my_df["vsby"].attrs["long_name"] = "Visibility"
my_df["vsbykm"] = my_df["vsby"] * 1.60934
my_df["vsbykm"].attrs["units"] = 'km'
my_df["vsbykm"].attrs["long_name"] = "Visibility"
my_df["gust"] = my_df["gust"] * 0.514444
my_df["gust"].attrs["units"] = 'm s-1'
my_df["gust"].attrs["long_name"] = "Wind gust speed"
my_df["skyc1"].attrs["long_name"] = "Sky level 1 coverage"
my_df["skyc2"].attrs["long_name"] = "Sky level 2 coverage"
my_df["skyc3"].attrs["long_name"] = "Sky level 3 coverage"
my_df["skyc4"].attrs["long_name"] = "Sky level 4 coverage"
my_df["skyl1"] = my_df["skyl1"] * 0.3048
my_df["skyl2"] = my_df["skyl2"] * 0.3048
my_df["skyl3"] = my_df["skyl3"] * 0.3048
my_df["skyl4"] = my_df["skyl4"] * 0.3048
my_df["skyl1"].attrs["long_name"] = "Sky level 1 altitude"
my_df["skyl2"].attrs["long_name"] = "Sky level 2 altitude"
my_df["skyl3"].attrs["long_name"] = "Sky level 3 altitude"
my_df["skyl4"].attrs["long_name"] = "Sky level 4 altitude"
my_df["skyl1"].attrs["long_name"] = "meter"
my_df["skyl2"].attrs["long_name"] = "meter"
my_df["skyl3"].attrs["long_name"] = "meter"
my_df["skyl4"].attrs["long_name"] = "meter"
my_df["wxcodes"].attrs["long_name"] = "Weather code"
my_df["ice_accretion_1hr"] = my_df["ice_accretion_1hr"] * 2.54
my_df["ice_accretion_1hr"].attrs["units"] = "cm"
my_df["ice_accretion_1hr"].attrs["long_name"] = "1 hour ice accretion"
my_df["ice_accretion_3hr"] = my_df["ice_accretion_3hr"] * 2.54
my_df["ice_accretion_3hr"].attrs["units"] = "cm"
my_df["ice_accretion_3hr"].attrs["long_name"] = "3 hour ice accretion"
my_df["ice_accretion_6hr"] = my_df["ice_accretion_3hr"] * 2.54
my_df["ice_accretion_6hr"].attrs["units"] = "cm"
my_df["ice_accretion_6hr"].attrs["long_name"] = "6 hour ice accretion"
my_df["peak_wind_gust"] = my_df["peak_wind_gust"] * 0.514444
my_df["peak_wind_gust"].attrs["units"] = 'm s-1'
my_df["peak_wind_gust"].attrs["long_name"] = "Peak wind gust speed"
my_df["peak_wind_drct"].attrs["drct"] = 'degree'
my_df["peak_wind_drct"].attrs["long_name"] = "Peak wind gust direction"
my_df['u_peak'] = -np.sin(np.deg2rad(my_df["peak_wind_drct"])) * my_df["peak_wind_gust"]
my_df['u_peak'].attrs["units"] = "m s-1"
my_df['u_peak'].attrs["long_name"] = "Zonal component of surface wind"
my_df['v_peak'] = -np.cos(np.deg2rad(my_df["peak_wind_drct"])) * my_df["peak_wind_gust"]
my_df['v_peak'].attrs["units"] = "m s-1"
my_df['v_peak'].attrs["long_name"] = "Meridional component of surface wind"
my_df["metar"].attrs["long_name"] = "Raw METAR code"
my_df.attrs['_datastream'] = stations
buf.close()
station_obs[stations] = my_df
return station_obs
| [
11748,
2124,
18747,
355,
2124,
81,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
14601,
198,
198,
6738,
2237,
1330,
10903,
9399,
198,
6738,
4818,
8079,
1330,
48... | 1.954459 | 6,280 |
#
# from random import randint
# numeros = []
# cont = 0
# maior = 1
# menor = 99
# while cont <= 4:
# cont += 1
# gerar = randint(1,99)
# print(gerar)
# if gerar < menor:
# menor = gerar
# elif gerar > maior:
# maior = gerar
# gerar = str(gerar)
# numeros.append(gerar)
# print(numeros)
# print(f'o Maior valor gerado foi {maior} e o menor valor gerado foi o {menor}.')
from random import randint
numeros = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10))
print('Os valores sorteados foram: ', end= '')
for n in numeros:
print(f'{n} ', end='')
print(f'\n'
f'O MAIOR VALOR FOI {max(numeros)}')
print(f'O Menor VALOR FOI {min(numeros)}')
| [
2,
198,
2,
422,
4738,
1330,
43720,
600,
198,
2,
5470,
418,
796,
17635,
198,
2,
542,
796,
657,
198,
2,
17266,
1504,
796,
352,
198,
2,
1450,
273,
796,
7388,
198,
2,
981,
542,
19841,
604,
25,
198,
2,
220,
220,
220,
220,
542,
1585... | 2.113372 | 344 |
#!/usr/bin/env python3
"""
Given:
summary tab delimited file
Render:
- simple SHAPE profile
- shape profile with error bars
- modified, untreated, denatured mutation rate profiles and stderr shading
- depths for each sample FIXME: get log-scaled depths working again
- (above profiles should only be rendered for RNAs
below some reasonable length to avoid out-of-memory errors.
In the future, render zoomed out median profiles)
- depth histograms
- reactivity, stderr histograms
Also do quality control checks:
- good depths over some fraction of nucs
- good mutation rates above background
- not too many high mutation rates in untreated sample
- Write any warnings to stdout, and display prominently in figures.
- In the future, also check for PCR artifacts (clusters of high
background positions explained by partial local self-complementarity).
"""
# --------------------------------------------------------------------- #
# This file is a part of ShapeMapper, and is licensed under the terms #
# of the MIT license. Copyright 2018 Steven Busan. #
# --------------------------------------------------------------------- #
import sys, os, argparse
from numpy import isnan, nan, sqrt
from numpy import nanpercentile as percentile
import numpy as np
from math import ceil
import matplotlib as mp
mp.use('Agg')
mp.rcParams["font.sans-serif"].insert(0,"Arial")
mp.rcParams["font.family"] = "sans-serif"
mp.rcParams["pdf.fonttype"] = 42 # use TrueType fonts when exporting PDFs
# (embeds most fonts - this is especially
# useful when opening in Adobe Illustrator)
mp.rcParams['xtick.direction'] = 'out'
mp.rcParams['ytick.direction'] = 'out'
mp.rcParams['legend.fontsize'] = 14
mp.rcParams['grid.color'] = ".8"
mp.rcParams['grid.linestyle'] = '-'
mp.rcParams['grid.linewidth'] = 1
mp.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
rx_color = "red"
bg_color = "blue"
dc_color = "darkgoldenrod"
if __name__=="__main__":
parser = argparse.ArgumentParser()
h = "Tab-delimited file summarizing reactivity"
h += " profile, mutation rates, and sequencing depth."
parser.add_argument("--infile", help=h, required=True, type=str)
h = "PDF file to render reactivity profile, mutation rates, and depths."
h += " This will only be rendered if the sequence is less than maxlen nucleotides long."
parser.add_argument("--plot", help=h, type=str)
h = "Maximum sequence length for rendering profile figures to PDF (large sequences "
h += "can cause out-of-memory errors)."
parser.add_argument("--maxlen", help=h, type=int, default=10000)
h = "PDF file to render summary histograms."
parser.add_argument("--hist", help=h, type=str)
h = "Title for figures."
parser.add_argument("--title", help=h, type=str)
h = "Minimum required sequencing depth for all provided samples for"
h += " including a nucleotide position."
parser.add_argument("--mindepth", help=h, type=int, default=5000)
h = "Maximum allowed mutation rate in untreated sample (if present)"
h += " for including a nucleotide position."
parser.add_argument("--maxbg", help=h, type=float, default=0.05)
h = "Minimum fraction of nucleotides that must pass mindepth threshold."
parser.add_argument("--min-depth-pass", help=h, type=float, default=0.8)
h = "Maximum fraction of nucleotides that can have background mutation rates above maxbg."
parser.add_argument("--max-high-bg", help=h, type=float, default=0.05)
h = "Minimum fraction of nucleotides with positive mutation rates above background"
h += " (excluding low-depth positions)."
parser.add_argument("--min-positive", help=h, type=float, default=0.5)
h = "Mutation rate (or rate difference, if using untreated control) threshold"
h += " for calling highly mutated nucleotides."
parser.add_argument("--high-mut-thresh", help=h, type=float, default=0.006) # 0.006-0.007
h = "Minimum fraction of nucleotides that are highly mutated"
h += " (excluding low-depth positions)."
parser.add_argument("--min-high-mut", help=h, type=float, default=0.08)
h = "Amplicon primer pair sequences and locations (to exclude from mutation rate histogram plots)"
parser.add_argument("--primers", help=h, type=str)
p = parser.parse_args(sys.argv[1:])
primers = []
if p.primers is not None and p.primers != "":
primers = load_primers(p.primers)
d = load_tab(p.infile)
# create output directories if needed
if p.plot is not None:
o = os.path.split(p.plot)[0]
if len(o)>0:
os.makedirs(o, exist_ok=True)
if p.hist is not None:
o = os.path.split(p.hist)[0]
if len(o)>0:
os.makedirs(o, exist_ok=True)
if "Norm_profile" in d:
profile = d["Norm_profile"]
stderr = d["Norm_stderr"]
else:
profile = None
stderr = None
if p.title is not None:
title = p.title
else:
title = ""
# mask out amplicon primer pair site sequences before running QC checks
masked_sequence = d["Sequence"].copy()
masked = np.zeros(masked_sequence.size, dtype=bool)
for pair in primers:
for primer in [pair.fw, pair.rv]:
for i in range(primer.left-1, primer.right):
try:
masked[i] = True
except IndexError:
pass
for i in range(len(masked)):
if masked[i]:
masked_sequence[i] = masked_sequence[i].lower()
qc_pass = qc_stats(masked_sequence,
d["Modified_rate"],
d["Untreated_rate"],
d["Denatured_rate"],
d["Modified_effective_depth"],
d["Untreated_effective_depth"],
d["Denatured_effective_depth"],
p.mindepth, p.maxbg,
p.min_depth_pass,
p.max_high_bg,
p.min_positive,
p.high_mut_thresh,
p.min_high_mut)
if p.plot is not None and (d["Modified_rate"] is None or
len(d["Modified_rate"]) <= p.maxlen):
render_profiles(
d["Nucleotide"], d["Sequence"], profile, stderr,
d["Modified_rate"], d["Untreated_rate"], d["Denatured_rate"],
d["Modified_effective_depth"], d["Untreated_effective_depth"], d["Denatured_effective_depth"],
d["Modified_read_depth"], d["Untreated_read_depth"], d["Denatured_read_depth"],
p.plot, title, qc_pass)
if p.hist is not None:
write_histograms(
profile, stderr,
p.mindepth, p.maxbg,
d["Sequence"],
d["Modified_rate"], d["Untreated_rate"], d["Denatured_rate"],
d["Modified_effective_depth"], d["Untreated_effective_depth"], d["Denatured_effective_depth"],
p.hist, title, qc_pass)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
15056,
25,
198,
10638,
7400,
46728,
863,
2393,
198,
198,
45819,
25,
198,
532,
2829,
6006,
45721,
7034,
198,
532,
5485,
7034,
351,
4049,
9210,
198,
532,
9518,
11,
41539,... | 2.498771 | 2,849 |
from configparser import ConfigParser
from pathlib import Path
cfg = ConfigParser()
cfg.read('myapp.ini')
# same, get int
assert int(cfg['http']['port']) == cfg['http'].getint('port')
# direct get from section
output_filename = Path(cfg['default']['output'], 'my_output.html')
print(output_filename, cfg['http'].get('host'))
# list all sections
section_list = cfg.sections()
print(section_list)
# list section members
keys_of_default_section = cfg['default'].keys()
values_of_default_section = cfg['default'].values()
print(keys_of_default_section, list(keys_of_default_section))
print(values_of_default_section, list(values_of_default_section))
for key in cfg['default']:
print(key)
# member check
print("member check:", 'port' in cfg['http'])
# set/get member like dict directly
counter = int(cfg['save']['count'])
counter += 1
cfg['save']['count'] = str(counter)
# write back
with open('myapp.ini', 'w', encoding='cp950') as f:
cfg.write(f)
| [
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
37581,
796,
17056,
46677,
3419,
198,
37581,
13,
961,
10786,
1820,
1324,
13,
5362,
11537,
198,
198,
2,
976,
11,
651,
493,
198,
30493,
493,
7,
37581,
1... | 2.882883 | 333 |
from rest_framework import serializers
from django.db.models import fields
from core.models import Subscriber
| [
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
7032,
198,
6738,
4755,
13,
27530,
1330,
3834,
1416,
24735,
628,
628,
220
] | 3.741935 | 31 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, VMRaid Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import vmraid
import unittest
from bs4 import BeautifulSoup
from vmraid.utils import set_request
from vmraid.website.render import render
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
12131,
11,
569,
13599,
1698,
21852,
290,
25767,
669,
198,
2,
4091,
5964,
13,
14116,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
8... | 3.267442 | 86 |
if __name__ == "__main__":
print(main(int(input())))
| [
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3601,
7,
12417,
7,
600,
7,
15414,
3419,
22305,
198
] | 2.269231 | 26 |
from accounts.models import CustomUser
from accounts.api.serializers import UserSerializer
from rest_framework import viewsets
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticatedOrReadOnly
| [
6738,
5504,
13,
27530,
1330,
8562,
12982,
198,
6738,
5504,
13,
15042,
13,
46911,
11341,
1330,
11787,
32634,
7509,
198,
6738,
1334,
62,
30604,
1330,
5009,
1039,
198,
6738,
1334,
62,
30604,
13,
41299,
3299,
1330,
23575,
47649,
3299,
198,
... | 4.140625 | 64 |
import numpy as np
import pandas as pd
from keras.models import Model
from keras.layers import Dense, Embedding, Input, Dropout
from keras.layers.merge import concatenate
from keras.layers import Conv1D, BatchNormalization, Activation, GlobalMaxPooling1D
#from keras.utils import multi_gpu_model
from data_utils.evaluation import *
from sklearn.metrics import f1_score
#from keras.utils.vis_utils import plot_model
embedding_dim = 256 # embedding layer dimension is fixed to 256
num_words = 40000
maxlen = 999
label_type = 'accusation' #一次训练一个标签
##################################################
# data and label
train_fact_pad_seq = np.load('./variables/pad_sequences/train_pad_%d_%d.npy' % (maxlen, num_words))
valid_fact_pad_seq = np.load('./variables/pad_sequences/valid_pad_%d_%d.npy' % (maxlen, num_words))
test_fact_pad_seq = np.load('./variables/pad_sequences/test_pad_%d_%d.npy' % (maxlen, num_words))
train_labels = np.load('./variables/labels/train_one_hot_%s.npy' % (label_type))
valid_labels = np.load('./variables/labels/valid_one_hot_%s.npy' % (label_type))
test_labels = np.load('./variables/labels/test_one_hot_%s.npy' % (label_type))
# label list 标签的类别以及一共有多少类
set_labels = np.load('./variables/label_set/set_%s.npy' % label_type)
##################################################
# model parameter
num_classes = train_labels.shape[1]
num_filters = 512
num_hidden = 1000
#modified origin 256
batch_size = 256
num_epochs = 2
dropout_rate = 0.2
##################################################
# simple textcnn
input = Input(shape=[train_fact_pad_seq.shape[1]], dtype='float64')
embedding_layer = Embedding(input_dim=num_words + 1,
input_length=maxlen,
output_dim=embedding_dim,
mask_zero=0,
name='Embedding')
embed = embedding_layer(input)
# 词窗大小分别为3,4,5,6
# filter_size = 3
cnn1 = Conv1D(num_filters, 3, strides=1, padding='same')(embed)
relu1 = Activation(activation='relu')(cnn1)
cnn1 = GlobalMaxPooling1D()(relu1)
# filter_size =4
cnn2 = Conv1D(num_filters, 4, strides=1, padding='same')(embed)
relu2 = Activation(activation='relu')(cnn2)
cnn2 = GlobalMaxPooling1D()(relu2)
# filter_size = 5
cnn3 = Conv1D(num_filters, 5, strides=1, padding='same')(embed)
relu3 = Activation(activation='relu')(cnn3)
cnn3 = GlobalMaxPooling1D()(relu3)
# filter_size = 6
cnn4 = Conv1D(num_filters, 6, strides=1, padding='same')(embed)
relu4 = Activation(activation='relu')(cnn4)
cnn4 = GlobalMaxPooling1D()(relu4)
# 合并四个模型的输出向量
cnn = concatenate([cnn1, cnn2, cnn3, cnn4], axis=-1)
bn = BatchNormalization()(cnn)
drop1 = Dropout(dropout_rate)(bn)
dense = Dense(num_hidden, activation="relu")(drop1)
drop2 = Dropout(dropout_rate)(dense)
main_output = Dense(num_classes, activation='sigmoid')(drop2)
model = Model(inputs=input, outputs=main_output)
#model=multi_gpu_model(model,gpus=2)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#plot_model(model, to_file="./pics/textcnn_filter345.png",show_shapes=True)
#########################################################################
for epoch in range(num_epochs):
model.fit(x=train_fact_pad_seq, y=train_labels,
batch_size=batch_size, epochs=1,
validation_data=(valid_fact_pad_seq, valid_labels), verbose=1)
# 计算准确率 calculate accuracy
predictions_valid = model.predict(valid_fact_pad_seq[:]) # 使用valid集做验证
# predictions_test = model.predict(test_fact_pad_seq[:]) # 使用test集做验证
# 用validation集来验证
predictions = predictions_valid
sets = set_labels
y1 = label2tag(valid_labels[:], sets) # 将验证集标签由one-hot转为原标签
y2 = predict2toptag(predictions, sets) # 只取最高的
y3 = predict2half(predictions, sets) # 只取概率大于0.5的
y4 = predict2tag(predictions, sets) # y2与y3的交集,即有大于0.5的取大于0.5的,没有就取概率最高的
# 只取最高置信度的准确率
s1 = [str(y1[i]) == str(y2[i]) for i in range(len(y1))]
print(sum(s1) / len(s1))
# 只取置信度大于0.5的准确率
s2 = [str(y1[i]) == str(y3[i]) for i in range(len(y1))]
print(sum(s2) / len(s2))
# 结合前两个
s3 = [str(y1[i]) == str(y4[i]) for i in range(len(y1))]
accuracy = int(np.round(sum(s3) / len(s3), 3) * 100)
print(accuracy)
# 计算f1 score calculate f1 score
predictions_one_hot = predict1hot(predictions)
f1_micro = f1_score(valid_labels,predictions_one_hot,average='micro')
print('f1_micro_accusation:', f1_micro)
f1_marco = f1_score(valid_labels, predictions_one_hot, average='macro')
print('f1_macro_accusation:', f1_marco)
# 取两者平均
f1_average = int(np.round((f1_marco + f1_micro) / 2, 2) * 100)
print('total:', f1_average)
# save model
model.save('./model/textcnn_%s_token_%s_pad_%s_filter_%s_hidden_%s_epoch_%s_accu_%s_f1_%s.h5' % (
label_type , num_words, maxlen, num_filters, num_hidden, epoch + 1, accuracy, f1_average))
# excel 保存原label与prediction的对比
r = pd.DataFrame({'label': y1, 'predict': y4})
r.to_excel('./results/textcnn_%s_token_%s_pad_%s_filter_%s_hidden_%s_epoch_%s_accu_%s_f1_%s.xlsx' % (
label_type, num_words, maxlen, num_filters, num_hidden, epoch + 1, accuracy, f1_average),
sheet_name='1', index=False)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
13302,
6048,
278,
11,
23412,
11,
14258,
448,
198,
6738,
41927... | 2.08618 | 2,518 |
# [h] center anchors
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.messages import no_font_open, no_glyph_selected
anchor_names = ['top', 'bottom']
f = CurrentFont()
if f is not None:
glyph_names = get_glyphs(f)
for glyph_name in glyph_names:
glyph = f[glyph_name]
anchors = glyph.anchors
if len(anchors) > 0:
glyph.prepareUndo('center anchors')
for anchor in anchors:
if anchor.name in anchor_names:
anchor.x = glyph.bounds[2] * 0.5
glyph.changed()
glyph.performUndo()
else:
print no_font_open
| [
2,
685,
71,
60,
3641,
43360,
198,
198,
6738,
289,
33637,
17,
13,
18170,
13,
10331,
26791,
1330,
651,
62,
10853,
746,
82,
198,
6738,
289,
33637,
17,
13,
18170,
13,
37348,
1095,
1330,
645,
62,
10331,
62,
9654,
11,
645,
62,
10853,
74... | 2.153333 | 300 |
# -*- coding: utf-8 -*-
import platform
import shutil
import tempfile
import pytest
PLATFORM = platform.system().lower()
@pytest.fixture(scope="function", autouse=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
3859,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
198,
11748,
12972,
9288,
198,
198,
6489,
1404,
21389,
796,
3859,
13,
10057,
22446,
21037,
3419,
628,
198,... | 2.883333 | 60 |
# coding: utf-8
# ### Convolutional LSTM implementation with peephole connections
__author__='Dawood Al Chanti'
# In[1]:
import tensorflow as tf
from tensorflow.contrib.slim import add_arg_scope
from tensorflow.contrib.slim import layers
from tensorflow.contrib.layers.python.layers import initializers
# In[ ]:
#tf.zeros_initializer: initilize with normal distribution instead of zero states
def init_state(inputs,
state_shape,
state_initializer= tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
dtype=tf.float32):
"""Helper function to create an initial state given inputs.
Args:
inputs: input Tensor, at least 2D, the first dimension being batch_size
state_shape: the shape of the state.
state_initializer: Initializer(shape, dtype) for state Tensor.
dtype: Optional dtype, needed when inputs is None.
Returns:
A tensors representing the initial state.
"""
if inputs is not None:
# Handle both the dynamic shape as well as the inferred shape.
inferred_batch_size = inputs.get_shape().with_rank_at_least(1)[0]
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
else:
inferred_batch_size = 0
batch_size = 0
initial_state = state_initializer(tf.pack([batch_size] + state_shape),dtype=dtype)
initial_state.set_shape([inferred_batch_size] + state_shape)
return initial_state
# In[ ]:
@add_arg_scope
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
44386,
34872,
2122,
282,
406,
2257,
44,
7822,
351,
613,
538,
13207,
8787,
198,
834,
9800,
834,
11639,
35,
707,
702,
978,
609,
17096,
6,
198,
198,
2,
554,
58,
16,
5974,
198,
198,
... | 2.758221 | 517 |
import csv
#need to extract the fields "EPC" and "Local Name" from the csv file "module frames 23june2021.csv"
filename = 'module frames 23june2021.csv' #this will need to be changed based on what the file is called on whatever computer
with open(filename, 'r', newline='') as csv_file:
reader = csv.reader(csv_file)
fields = next(reader)
rows = list(reader)
for column_number, field in enumerate(fields):
if(field == 'EPC'):
print(field)
for row in rows:
print(row[column_number])
print('\n')
elif(field == 'Local Name'):
print(field)
for row in rows:
print(row[column_number])
print('\n')
| [
11748,
269,
21370,
201,
198,
201,
198,
2,
31227,
284,
7925,
262,
7032,
366,
36,
5662,
1,
290,
366,
14565,
6530,
1,
422,
262,
269,
21370,
2393,
366,
21412,
13431,
2242,
73,
1726,
1238,
2481,
13,
40664,
1,
201,
198,
201,
198,
34345,
... | 2.42029 | 276 |
"""
Set up a simple Whoosh index and provide a few simple methods to update and
search it. To try at a REPL or from another module.
Here's some copypasta for, say, an iPython session:
article_root = "/Users/nikhilanand/Dropbox/wiki.nikhil.io.articles"
search_index = create_search_index(article_root)
update_index_incrementally(
article_root,
search_index,
absolute_paths_to_articles_in(article_root),
)
print(search_articles(article_root, search_index, "*BALAKRISH*"))
In the case of this particular project, the `watch` module uses the helper
functions here to 'intelligently' (heh) update the index when articles are
added, updated, or deleted. Simple stuff, yo.
TODO:
- NGRAMWORDS type for search fields
- Tune the index by reading the Whoosh documentation beyond the "QuickStart"
"""
import os
import whoosh
from whoosh.fields import ID, STORED, TEXT, Schema
from whoosh.qparser import FuzzyTermPlugin, MultifieldParser, WildcardPlugin
from bock.constants import (
MAX_CHARS_IN_SEARCH_RESULTS,
MAX_CHARS_SURROUNDING_SEARCH_HIGHLIGHT,
MAX_SEARCH_RESULTS,
MIN_CHARS_IN_SEARCH_TERM,
SEARCH_INDEX_PATH,
)
from bock.helpers import relative_article_path_from
from bock.logger import log
search_schema = Schema(
path=ID(stored=True, unique=True),
name=ID(stored=True),
modified_time=STORED,
content=TEXT,
# fragments=NGRAMWORDS(minsize=3, maxsize=5, stored=True),
)
search_parser = MultifieldParser(
[
"name",
"content",
"path",
],
schema=search_schema,
)
search_parser.add_plugin(FuzzyTermPlugin())
search_parser.add_plugin(WildcardPlugin())
# NOTE: there's an `update_document` method in Whoosh. Does the exact thing.
# https://whoosh.readthedocs.io/en/latest/indexing.html#updating-documents
| [
37811,
198,
7248,
510,
257,
2829,
5338,
3768,
6376,
290,
2148,
257,
1178,
2829,
5050,
284,
4296,
290,
198,
12947,
340,
13,
1675,
1949,
379,
257,
45285,
393,
422,
1194,
8265,
13,
198,
198,
4342,
338,
617,
2243,
4464,
40197,
329,
11,
... | 2.714074 | 675 |
"""
The aim of this script is to create a database in hdf5 format
"""
# import copy
import time
import sys
from official_fcns import *
def create_hfd5_data_structure(hdf5file, groupname, num_trials, num_samples, max_trials=1000000):
"""
:param hdf5file: h5py.File
:param groupname:
:param num_trials: nb of trials
:param max_trials: for db decision datasets max nb of rows
:param num_samples: for db decision datasets; nb of cols
:return: created group
"""
group = hdf5file.create_group(groupname)
dt = h5py.special_dtype(vlen=np.dtype('f'))
group.create_dataset('trials', (num_trials, 3), maxshape=(max_trials, 10), dtype=dt)
group.create_dataset('trial_info', (num_trials, 3), maxshape=(max_trials, 10), dtype='f')
group.create_dataset('decision_lin', (num_trials, num_samples), dtype='i', maxshape=(max_trials, num_samples))
group.create_dataset('decision_nonlin', (num_trials, num_samples),
dtype='i', maxshape=(max_trials, num_samples))
return group
def populate_hfd5_db(fname, four_par, num_of_trials, number_of_samples=1):
"""
Generate stimulus data and store as hdf5 file.
This is the main function called by this script.
"""
# open/create file
f = h5py.File(fname, 'a')
ll, lh, h, t = four_par
# create group corresponding to parameters
group_name = build_group_name(four_par)
if group_name in f: # if dataset already exists, exit without doing anything
print('data already present, file left untouched')
else: # if dataset doesn't exist, create it
print('creating dataset with group name {}'.format(group_name))
grp = create_hfd5_data_structure(f, group_name, num_of_trials, num_samples=number_of_samples)
# create trials dataset
trials_data = grp['trials']
# get row indices of new data to insert
row_indices = np.r_[:num_of_trials]
# create info on data
info_data = grp['trial_info'] # info dataset
info_data.attrs['h'] = h
info_data.attrs['T'] = t
info_data.attrs['low_click_rate'] = ll
info_data.attrs['high_click_rate'] = lh
info_data.attrs['S'] = (lh - ll) / np.sqrt(ll + lh)
data_version = 1 # version number of new data to insert
# populate database
for row_idx in row_indices:
# vector of CP times
cptimes = gen_cp(t, h)
trials_data[row_idx, 2] = cptimes
# stimulus (left clicks, right clicks)
(left_clicks, right_clicks), init_state, end_state = gen_stim(cptimes, ll, lh, t)
trials_data[row_idx, :2] = left_clicks, right_clicks
# populate info dataset
info_data[row_idx, :] = init_state, end_state, data_version
info_data.attrs['last_version'] = data_version
f.flush()
f.close()
if __name__ == '__main__':
"""
aim is to create a small size database with data from a single dataset
arguments passed to the script should be in the following order:
1. low rate
2. S
3. hazard rate
4. interrogation time
5. db filename
6. num_trials
7. num_samples
"""
if len(sys.argv) == 8:
# low click rate
try:
lr = float(sys.argv[1])
except ValueError:
print('\nError msg: first command line arg corresponding to low click rate should be a positive scalar\n')
exit(1)
# S (Skellam SNR)
try:
S = float(sys.argv[2])
except ValueError:
print('\nError msg: second command line arg corresponding to S should be in [0.5,1,...,10]\n')
exit(1)
# hazard rate
try:
hazard = float(sys.argv[3])
except ValueError:
print('\nError msg: third command line arg corresponding to h should be a non-negative scalar\n')
exit(1)
# interrogation time
try:
int_time = float(sys.argv[4])
except ValueError:
print('\nError msg: fourth command line arg corresponding to T should be a positive scalar\n')
exit(1)
# hdf5 db filename
try:
filename = sys.argv[5]
if filename[-3:] != '.h5':
raise ValueError("By convention, db filename should end with '.h5'")
except ValueError as err:
print('\nError msg: fifth command line arg corresponding to filename has a pb')
print(err.args)
exit(1)
# Number of Trials
try:
number_of_trials = int(sys.argv[6])
except ValueError:
print('\nError msg: sixth command line arg corresponding to number of trials should be an integer\n')
exit(1)
# Number of Samples
try:
nsamples = int(sys.argv[7])
except ValueError:
print('\nError msg: seventh command line arg corresponding to number of samples should be an integer\n')
exit(1)
start_time = time.time()
hr = get_lambda_high(lr, S)
fp = (lr, hr, hazard, int_time)
grp_name = build_group_name(fp)
true_g = get_best_gamma(S, hazard)
populate_hfd5_db(filename, fp, number_of_trials)
print("--- {} seconds ---".format(time.time() - start_time))
else:
raise OSError('Script called with wrong number of command line args')
exit(1)
| [
37811,
198,
464,
4031,
286,
428,
4226,
318,
284,
2251,
257,
6831,
287,
289,
7568,
20,
5794,
198,
37811,
198,
198,
2,
1330,
4866,
198,
11748,
640,
198,
11748,
25064,
198,
6738,
1743,
62,
16072,
5907,
1330,
1635,
628,
198,
4299,
2251,
... | 2.286316 | 2,375 |
# Generated by Django 2.1.5 on 2019-02-07 16:25
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
20,
319,
13130,
12,
2999,
12,
2998,
1467,
25,
1495,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.960784 | 51 |
import turtle
for i in range(6):
turtle.forward(100)
turtle.right(60)
turtle.done()
| [
11748,
28699,
198,
1640,
1312,
287,
2837,
7,
21,
2599,
198,
220,
220,
220,
28699,
13,
11813,
7,
3064,
8,
198,
220,
220,
220,
28699,
13,
3506,
7,
1899,
8,
198,
83,
17964,
13,
28060,
3419,
198
] | 2.486486 | 37 |
#!/usr/bin/env python
import os
import glob
import argparse
# Set up command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--epitope_dir', type=str, required=True,
help='path to directory containing neoepiscope results'
)
parser.add_argument('-o', '--output_dir', type=str, required=True,
help='path to output directory'
)
args = parser.parse_args()
# Grab phased epitope files
epitope_file_list = glob.glob(os.path.join(args.epitope_dir, '.neoepiscope.out'))
# Open output file and write header
with open(os.path.join(args.output_dir, 'phasing_epitope_data.tsv'), 'w') as o:
o.write('\t'.join(['Patient', 'Tumor', 'Total_eps', 'Shared', 'Phased_only', 'Unphased_only']) + '\n')
for phased_eps in epitope_file_list:
# Get corresponding unphased epitope file
unphased_eps = phased_eps.replace('.out', '.somatic_unphased.out')
# Process phased epitopes
with open(phased_eps) as f:
f.readline()
f.readline()
for ep_line in f:
ep_tokens = ep_line.strip().split('\t')
phased.add(ep_tokens[0])
# Process unphased epitopes
with open(unphased_eps) as f:
f.readline()
f.readline()
for ep_line in f:
ep_tokens = ep_line.strip().split('\t')
if ep_tokens[0] not in phased:
# Epitope is unique to unphased calling
unphased_only += 1
else:
# Epitope is common to both phased and unphased calling
shared += 1
# Write output
o.write('\t'.join([tokens[0], tokens[2], str(len(phased) + unphased_only), str(shared), str(len(phased) - shared), str(unphased_only)]) + '\n')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
1822,
29572,
198,
198,
2,
5345,
510,
3141,
1627,
7159,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,... | 2.416159 | 656 |
"""
In this Bite we are going to parse a csv movie dataset to identify the
directors with the highest rated movies.
Write get_movies_by_director: use csv.DictReader to convert
movie_metadata.csv into a (default)dict of lists of Movie namedtuples.
Convert/filter the data:
Only extract director_name, movie_title, title_year and imdb_score.
Type conversions: title_year -> int / imdb_score -> float
Discard any movies older than 1960.
Here is an extract:
....
{ 'Woody Allen': [
Movie(title='Midnight in Paris', year=2011, score=7.7),
Movie(title='The Curse of the Jade Scorpion', year=2001, score=6.8),
Movie(title='To Rome with Love', year=2012, score=6.3), ....
], ...
}
Write the calc_mean_score helper that takes a list of Movie namedtuples
and calculates the mean IMDb score, returning the score rounded to 1
decimal place.
Complete get_average_scores which takes the directors data structure
returned by get_movies_by_director (see 1.) and returns a list of tuples
(director, average_score) ordered by highest score in descending order.
Only take directors into account with >= MIN_MOVIES
See the tests for more info. This could be tough one, but we really hope
you learn a thing or two. Good luck and keep calm and code in Python!
"""
import csv
from collections import defaultdict, namedtuple
import os
from urllib.request import urlretrieve
from statistics import mean
from tempfile import gettempdir
BASE_URL = "http://projects.bobbelderbos.com/pcc/movies/"
TMP = gettempdir()
fname = "movie_metadata.csv"
remote = os.path.join(BASE_URL, fname)
local = os.path.join(TMP, fname)
urlretrieve(remote, local)
MOVIE_DATA = local
MIN_MOVIES = 4
MIN_YEAR = 1960
Movie = namedtuple("Movie", "title year score")
def get_movies_by_director():
"""Extracts all movies from csv and stores them in a dict,
where keys are directors, and values are a list of movies,
use the defined Movie namedtuple"""
directors = defaultdict(list)
with open(MOVIE_DATA, encoding="utf8") as f:
for line in csv.DictReader(f):
try:
director = line["director_name"]
movie = line["movie_title"].replace("\xa0", "")
year = int(line["title_year"])
score = float(line["imdb_score"])
except ValueError:
continue
if year and year < MIN_YEAR:
continue
m = Movie(title=movie, year=year, score=score)
directors[director].append(m)
return directors
def calc_mean_score(movies):
"""Helper method to calculate mean of list of Movie namedtuples,
round the mean to 1 decimal place"""
return float("{:.1f}".format(mean([movie.score for movie in movies])))
def get_average_scores(directors):
"""Iterate through the directors dict (returned by get_movies_by_director),
return a list of tuples (director, average_score) ordered by highest
score in descending order. Only take directors into account
with >= MIN_MOVIES"""
director_avg = [
(director, calc_mean_score(movies))
for director, movies in directors.items()
if len(movies) >= MIN_MOVIES
]
return sorted(director_avg, key=lambda tup: tup[1], reverse=True)
| [
37811,
198,
818,
428,
44540,
356,
389,
1016,
284,
21136,
257,
269,
21370,
3807,
27039,
284,
5911,
262,
198,
13445,
351,
262,
4511,
13178,
6918,
13,
198,
198,
16594,
651,
62,
76,
20526,
62,
1525,
62,
35248,
25,
779,
269,
21370,
13,
3... | 2.798126 | 1,174 |
from flask import Flask, render_template
import requests
import logging
from elasticapm.contrib.flask import ElasticAPM
app = Flask(__name__)
apm = ElasticAPM(app, logging=logging.INFO,
server_url="",
service_name="",
secret_token="",
use_elastic_traceparent_header=True)
@app.route("/")
if __name__ == "__main__":
app.run(debug=False)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
11748,
7007,
198,
11748,
18931,
198,
6738,
27468,
499,
76,
13,
3642,
822,
13,
2704,
2093,
1330,
48567,
2969,
44,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
499,
76... | 2.191489 | 188 |
import argparse
import os
import yaml
from collections import OrderedDict
import sys
from os import path
sys.path.append(path.dirname(path.abspath(__file__)))
import cwrap_parser
import nn_parse
import native_parse
import preprocess_declarations
import function_wrapper
from code_template import CodeTemplate
# This file is the top-level entry point for code generation in ATen.
# It takes an arbitrary number of arguments specifying metadata files to
# process (.cwrap, .yaml and .h) and outputs a number generated header
# and cpp files in ATen/ (see invocations of 'write' for each file that
# is written.) It is invoked from cmake; look for the 'cwrap_files'
# variable for an up-to-date list of files which are passed.
parser = argparse.ArgumentParser(description='Generate ATen source files')
parser.add_argument('files', help='cwrap files', nargs='+')
parser.add_argument(
'-s',
'--source-path',
help='path to source directory for ATen',
default='.')
parser.add_argument(
'-o',
'--output-dependencies',
help='output a list of dependencies into the given file and exit')
parser.add_argument(
'-d', '--install_dir', help='output directory', default='ATen')
parser.add_argument(
'--rocm',
action='store_true',
help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly')
options = parser.parse_args()
gen_to_source = os.environ.get('GEN_TO_SOURCE') # update source directly as part of gen
if not gen_to_source:
core_install_dir = os.path.join(options.install_dir, 'core_tmp') if options.install_dir is not None else None
else:
core_install_dir = os.path.join(options.source_path, 'core')
if options.install_dir is not None and not os.path.exists(options.install_dir):
os.makedirs(options.install_dir)
if core_install_dir is not None and not os.path.exists(core_install_dir):
os.makedirs(core_install_dir)
TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
TEMPLATE_PATH + "/GeneratorDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/SparseTypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_EXTENDED_INTERFACE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtendedInterface.h")
TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
LEGACY_TH_DISPATCHER_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.h")
LEGACY_TH_DISPATCHER_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.cpp")
LEGACY_TH_DISPATCHER_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.cpp")
LEGACY_TH_DISPATCHER_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.h")
REGISTER_CPU_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.h")
REGISTER_CPU_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.cpp")
REGISTER_CUDA_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCUDA.h")
REGISTER_CUDA_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCUDA.cpp")
TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHFunctions.h")
NATIVE_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/NativeFunctions.h")
TYPE_REGISTER = CodeTemplate("""\
context->registerType(Backend::${backend}, ScalarType::${scalar_type}, new ${type_name}());
""")
core_file_manager = FileManager(core_install_dir)
file_manager = FileManager()
cuda_file_manager = FileManager()
generators = {
'CPUGenerator.h': {
'name': 'CPU',
'th_generator': 'THGenerator * generator;',
'header': 'TH/TH.h',
},
'CUDAGenerator.h': {
'name': 'CUDA',
'th_generator': '',
'header': 'THC/THC.h' if not options.rocm else 'THH/THH.h'
},
}
backends = ['CPU', 'CUDA']
densities = ['Dense', 'Sparse']
# scalar_name, c_type, accreal, th_scalar_type, is_floating_type
scalar_types = [
('Byte', 'uint8_t', 'Long', 'uint8_t', False),
('Char', 'int8_t', 'Long', 'int8_t', False),
('Double', 'double', 'Double', 'double', True),
('Float', 'float', 'Double', 'float', True),
('Int', 'int', 'Long', 'int32_t', False),
('Long', 'int64_t', 'Long', 'int64_t', False),
('Short', 'int16_t', 'Long', 'int16_t', False),
('Half', 'Half', 'Double', 'at::Half', True),
]
# shared environment for non-derived base classes Type.h Tensor.h Storage.h
top_env = {
'cpu_type_registrations': [],
'cpu_type_headers': [],
'cuda_type_registrations': [],
'cuda_type_headers': [],
'pure_virtual_type_method_declarations': [],
'pure_virtual_extended_type_method_declarations': [],
'type_method_declarations': [],
'type_method_definitions': [],
'tensor_method_declarations': [],
'tensor_method_definitions': [],
'function_declarations': [],
'function_definitions': [],
'type_ids': [],
'native_function_declarations': [],
}
###################
# declare what files will be output _before_ we do any work
# so that the script runs quickly when we are just querying the
# outputs
# because EOL may not be LF(\n) on some environment (e.g. Windows),
# normalize EOL from CRLF/CR to LF and compare both files.
declare_outputs()
if options.output_dependencies is not None:
file_manager.write_outputs(options.output_dependencies)
core_file_manager.write_outputs(options.output_dependencies + "-core")
cuda_file_manager.write_outputs(options.output_dependencies + "-cuda")
else:
generate_outputs()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
11748,
331,
43695,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
25064,
198,
6738,
28686,
1330,
3108,
198,
17597,
13,
6978,
13,
33295,
7,
6978,
13,
15908,
3672,
7,
6978,
... | 2.67536 | 2,224 |
from django.db import connection
from django_mailbox.models import Message
import os
import sqlite3
def sizeof_formatter(num, suffix='B'):
'''
http://stackoverflow.com/a/1094933/3480790
'''
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
| [
6738,
42625,
14208,
13,
9945,
1330,
4637,
198,
6738,
42625,
14208,
62,
4529,
3524,
13,
27530,
1330,
16000,
198,
198,
11748,
28686,
198,
11748,
44161,
578,
18,
628,
198,
198,
4299,
39364,
62,
687,
1436,
7,
22510,
11,
35488,
11639,
33,
... | 2.16 | 175 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-11-14 18:30
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
20,
319,
2177,
12,
1157,
12,
1415,
1248,
25,
1270,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.808219 | 73 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import click
import yo.yolog
import json
import structlog
logger = structlog.get_logger()
@click.command(name='watch-table')
@click.option('--database_url', envvar='DATABASE_URL')
@click.option('--table_name')
@click.option('--channel')
if __name__ == "__main__":
watch()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
30351,
952,
198,
198,
11748,
3904,
198,
11748,
27406,
13,
88,
928,
198,
11748,
33918,
198,
11748,
2878,
6404,
... | 2.646154 | 130 |
# coding=utf-8
#数据合并与展平
#合并一维数组
import numpy as np
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
c = np.append(a, b)
print(c)
#上面使用append,下面使用concatenate
d = np.concatenate([a, b])
print(d)
#多维数组的合并
import numpy as np
a = np.arange(4).reshape(2, 2)
b = np.arange(4).reshape(2, 2)
#按行合并
c = np.append(a, b, axis = 0)
print(c)
print("合并后数据维度", c.shape)
#按列合并
d = np.append(a, b, axis = 1)
print("按列合并结果:")
print(d)
print("合并后数据维度", d.shape)
#矩阵展平
import numpy as np
nd15 = np.arange(6).reshape(2, -1)
print(nd15)
#按列优先,展平
print("按列优先,展平")
print(nd15.ravel('F'))
#按行优先,展平
print("按行优先,展平")
print(nd15.ravel())
'''
输出:
[1 2 3 4 5 6]
[1 2 3 4 5 6]
[[0 1]
[2 3]
[0 1]
[2 3]]
合并后数据维度 (4, 2)
按列合并结果:
[[0 1 0 1]
[2 3 2 3]]
合并后数据维度 (2, 4)
[[0 1 2]
[3 4 5]]
按列优先,展平
[0 3 1 4 2 5]
按行优先,展平
[0 1 2 3 4 5]
''' | [
2,
19617,
28,
40477,
12,
23,
198,
2,
46763,
108,
162,
235,
106,
28938,
230,
33176,
114,
10310,
236,
161,
109,
243,
33176,
111,
198,
198,
2,
28938,
230,
33176,
114,
31660,
163,
119,
112,
46763,
108,
163,
119,
226,
198,
11748,
299,
... | 1.21267 | 663 |
# coding: utf-8
"""
ESPER API REFERENCE
OpenAPI spec version: 1.0.0
Contact: developer@esper.io
---------------------------------------------------------
Copyright 2019 Shoonya Enterprises Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from esperclient.api_client import ApiClient
class GroupCommandsApi(object):
"""NOTE: This class is auto generated.
Do not edit the class manually.
"""
def get_group_command(self, command_id, group_id, enterprise_id, **kwargs):
"""Get group command status
Returns GroupCommand instance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_command(command_id, group_id, enterprise_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str command_id: A UUID string identifying this device command. (required)
:param str group_id: A UUID string identifying this group. (required)
:param str enterprise_id: A UUID string identifying enterprise. (required)
:return: GroupCommand
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_group_command_with_http_info(command_id, group_id, enterprise_id, **kwargs)
else:
(data) = self.get_group_command_with_http_info(command_id, group_id, enterprise_id, **kwargs)
return data
def get_group_command_with_http_info(self, command_id, group_id, enterprise_id, **kwargs):
"""Get group command status
Returns GroupCommand instance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_group_command_with_http_info(command_id, group_id, enterprise_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str command_id: A UUID string identifying this device command. (required)
:param str group_id: A UUID string identifying this group. (required)
:param str enterprise_id: A UUID string identifying enterprise. (required)
:return: GroupCommand
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['command_id', 'group_id', 'enterprise_id']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_group_command" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'command_id' is set
if ('command_id' not in params or
params['command_id'] is None):
raise ValueError("Missing the required parameter `command_id` when calling `get_group_command`")
# verify the required parameter 'group_id' is set
if ('group_id' not in params or
params['group_id'] is None):
raise ValueError("Missing the required parameter `group_id` when calling `get_group_command`")
# verify the required parameter 'enterprise_id' is set
if ('enterprise_id' not in params or
params['enterprise_id'] is None):
raise ValueError("Missing the required parameter `enterprise_id` when calling `get_group_command`")
collection_formats = {}
path_params = {}
if 'command_id' in params:
path_params['command_id'] = params['command_id']
if 'group_id' in params:
path_params['group_id'] = params['group_id']
if 'enterprise_id' in params:
path_params['enterprise_id'] = params['enterprise_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# Authentication setting
auth_settings = ['apiKey']
return self.api_client.call_api(
'/enterprise/{enterprise_id}/devicegroup/{group_id}/command/{command_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GroupCommand',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def run_group_command(self, enterprise_id, group_id, data, **kwargs):
"""Run commands on group devices
Fire commands on all the group devices
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.run_group_command(enterprise_id, group_id, data, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str enterprise_id: ID of the enterprise (required)
:param str group_id: ID of the group (required)
:param GroupCommandRequest data: Group command request (required)
:return: GroupCommand
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.run_group_command_with_http_info(enterprise_id, group_id, data, **kwargs)
else:
(data) = self.run_group_command_with_http_info(enterprise_id, group_id, data, **kwargs)
return data
def run_group_command_with_http_info(self, enterprise_id, group_id, data, **kwargs):
"""Run commands on group devices
Fire commands on all the group devices
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.run_group_command_with_http_info(enterprise_id, group_id, data, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str enterprise_id: ID of the enterprise (required)
:param str group_id: ID of the group (required)
:param GroupCommandRequest data: Group command request (required)
:return: GroupCommand
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['enterprise_id', 'group_id', 'data']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method run_group_command" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'enterprise_id' is set
if ('enterprise_id' not in params or
params['enterprise_id'] is None):
raise ValueError("Missing the required parameter `enterprise_id` when calling `run_group_command`")
# verify the required parameter 'group_id' is set
if ('group_id' not in params or
params['group_id'] is None):
raise ValueError("Missing the required parameter `group_id` when calling `run_group_command`")
# verify the required parameter 'data' is set
if ('data' not in params or
params['data'] is None):
raise ValueError("Missing the required parameter `data` when calling `run_group_command`")
collection_formats = {}
path_params = {}
if 'enterprise_id' in params:
path_params['enterprise_id'] = params['enterprise_id']
if 'group_id' in params:
path_params['group_id'] = params['group_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# Authentication setting
auth_settings = ['apiKey']
return self.api_client.call_api(
'/enterprise/{enterprise_id}/devicegroup/{group_id}/command/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GroupCommand',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
1546,
18973,
7824,
4526,
24302,
18310,
198,
198,
11505,
17614,
1020,
2196,
25,
352,
13,
15,
13,
15,
198,
17829,
25,
8517,
31,
274,
525,
13,
952,
198,
43801,
12,
198,
198,
15269... | 2.439041 | 4,298 |
# https://leetcode.com/problems/two-sum-iv-input-is-a-bst/description/
# brute-force algorithm, but in BST, finding a element is O(log(n)),that is the advantage
head = treeNode(2)
head.left = treeNode(1)
head.right = treeNode(3)
solution = Solution()
print(solution.findTarget(head,4)) | [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
11545,
12,
16345,
12,
452,
12,
15414,
12,
271,
12,
64,
12,
65,
301,
14,
11213,
14,
198,
2,
33908,
12,
3174,
11862,
11,
475,
287,
44992,
11,
4917,
257,
5002,
318,
440,
... | 2.803922 | 102 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UpgradeOrchestrationServiceStateSummary(Model):
"""Service state summary of Service Fabric Upgrade Orchestration Service.
:param current_code_version: The current code version of the cluster.
:type current_code_version: str
:param current_manifest_version: The current manifest version of the
cluster.
:type current_manifest_version: str
:param target_code_version: The target code version of the cluster.
:type target_code_version: str
:param target_manifest_version: The target manifest version of the
cluster.
:type target_manifest_version: str
:param pending_upgrade_type: The type of the pending upgrade of the
cluster.
:type pending_upgrade_type: str
"""
_attribute_map = {
'current_code_version': {'key': 'CurrentCodeVersion', 'type': 'str'},
'current_manifest_version': {'key': 'CurrentManifestVersion', 'type': 'str'},
'target_code_version': {'key': 'TargetCodeVersion', 'type': 'str'},
'target_manifest_version': {'key': 'TargetManifestVersion', 'type': 'str'},
'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
198,
2,
5964,
1321... | 3.385081 | 496 |
# -*- coding: utf-8 -*-
# From: https://github.com/alexa/skill-sample-python-helloworld-classes/blob/master/lambda/py/hello_world.py
import logging
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import (
AbstractRequestHandler, AbstractExceptionHandler,
AbstractRequestInterceptor, AbstractResponseInterceptor)
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model.ui import SimpleCard
from ask_sdk_model import Response
sb = SkillBuilder()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
class FallbackIntentHandler(AbstractRequestHandler):
"""AMAZON.FallbackIntent is only available in en-US locale.
This handler will not be triggered except in that locale,
so it is safe to deploy on any locale.
"""
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Handler catch exceptions."""
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
3574,
25,
3740,
1378,
12567,
13,
785,
14,
1000,
27865,
14,
42401,
12,
39873,
12,
29412,
12,
12758,
322,
1764,
12,
37724,
14,
2436,
672,
14,
9866,
14,
50033,
... | 3.365805 | 503 |
from setuptools import setup, find_packages
setup(
name="mysql-stream",
packages=find_packages(),
version='0.0.4',
description="simple and easy to use MySQL client and ORM library",
author="chenchong",
author_email='boy3565@163.com',
url="https://github.com/streamDream/mysql-stream",
keywords=['mysql', 'client', 'orm'],
classifiers=[],
install_requires=[
'dbutils',
'PyMySQL',
]
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
2625,
28744,
13976,
12,
5532,
1600,
198,
220,
220,
220,
10392,
28,
19796,
62,
43789,
22784,
198,
220,
220,
220,
2196,
11639,
15,
... | 2.494382 | 178 |
import pandas as pd
import os
import numpy as np
from numerai.scoring import sharpe
import numerapi
class DataGetter:
""" Used to get latest data from numerapi, note that it is live,
so results might differ notebook is ran at different point in time."""
def __init__(self):
"""NumerAPI instance"""
self.napi = numerapi.NumerAPI(public_id=os.getenv("PUBLIC_ID"),
secret_key=os.getenv("SECRET_KEY"),
verbosity='info')
def get_data(self, load_from=None, training_or_tournament='training', use_cols=None, chunksize=None):
""" Loads data, reduces memory footprint, indexes and processes `era` column as number for analysis
Parameters:
-----------
training_or_tournament: str
Whether to get training or tournament data
Tournament data is more than couple GB size, so only fetch it for predictions
use_cols: list
List of columns to read from data
Can be used to limit data size if only relevant columns are needed for prediction
Returns
-------
pandas.core.frame.DataFrame
"""
data_path = self._get_data_path(load_from, training_or_tournament)
print(f'Getting {training_or_tournament} data from {data_path}\n')
self._check_if_data_downloaded(data_path)
usecols = self._get_columns_to_use(data_path, use_cols)
reader = pd.read_csv(data_path, usecols=usecols, chunksize=chunksize, iterator=True)
return pd.concat(chunk.pipe(self._reduce_memory_footprint)
.assign(era=lambda x: x['era'].str.replace('era', ''))
.pipe(self._set_index)
for chunk in reader)
def _get_data_path(self, load_from, training_or_tournament):
"""Downloaded data is stored with latest prediction round number suffix included in data path"""
if load_from is not None:
if not os.path.exists(load_from):
print(f'{load_from} directory not found!')
else:
round_number = self.napi.get_competitions()[0]['number']
load_from = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
f'numerai_dataset_{round_number}')
return os.path.join(load_from, f'numerai_{training_or_tournament}_data.csv')
def _reduce_memory_footprint(self, df):
""" Since features and target all have values in [0, .25, .5, .75, 1] range,
everything is downcasted into int8 instead of float64 to save memory."""
print(f'Reducing memory usage by converting features and target to integers\n')
cols = [col for col in df.columns if 'feature' in col] + ['target']
df[cols] = df[cols].replace(self.value_map).astype('int8')
return df
@property
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
5470,
1872,
13,
46536,
1330,
13986,
431,
198,
11748,
5470,
15042,
628,
198,
4871,
6060,
3855,
353,
25,
198,
220,
220,
220,
37227,
16718,
... | 2.320381 | 1,261 |
__version__ = '0.7.4'
__author__ = "Massimiliano Pippi & Federico Frenguelli"
VERSION = __version__ # synonym
| [
834,
9641,
834,
796,
705,
15,
13,
22,
13,
19,
6,
198,
198,
834,
9800,
834,
796,
366,
20273,
26641,
10115,
350,
12715,
1222,
35089,
3713,
4848,
782,
518,
15516,
1,
198,
198,
43717,
796,
11593,
9641,
834,
220,
1303,
6171,
5177,
198
] | 2.627907 | 43 |
from django.contrib import admin
from . import models
@admin.register(models.Message)
@admin.register(models.Chat)
@admin.register(models.Member)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
1330,
4981,
628,
198,
31,
28482,
13,
30238,
7,
27530,
13,
12837,
8,
628,
198,
31,
28482,
13,
30238,
7,
27530,
13,
30820,
8,
628,
198,
31,
28482,
13,
30238,
7,
27530,
... | 3.234043 | 47 |
# -*- coding: utf-8 -*-
"""SNF.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/15jbQLqhfFg0702_O2gy-0oXndCGRACOP
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
#importing dataset
df=pd.read_csv('./content/diabetes.csv')
df.head()
df.shape
df.info()
df.describe()
sns.pairplot(df,hue='Outcome')
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
sns.set_theme(style="whitegrid")
sns.boxplot(x="Age", data=df, palette="Set3")
plt.title("Age Distribution")
fig = plt.figure(figsize = (15,20))
ax = fig.gca()
df.hist(ax = ax)
df.Outcome.value_counts().plot(kind='bar')
plt.xlabel("Diabetes or Not")
plt.ylabel("Count")
plt.title("Outcome ")
#Here we can see that dataset is not much imbalanced so there is no need to balance.
X=df.drop('Outcome',axis=1)
X.head()
y=df['Outcome']
y.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2,random_state=0)
X_train.shape
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
X_train=sc_x.fit_transform(X_train)
X_test=sc_x.transform(X_test)
from sklearn.neighbors import KNeighborsClassifier
knn=KNeighborsClassifier(n_neighbors=5,metric='euclidean',p=2)
knn.fit(X_train,y_train)
pickle.dump(knn, open('savedmodel.pkl', 'wb'))
y_pred=knn.predict(X_test)
y_pred
knn.score(X_test,y_test)
from sklearn.metrics import accuracy_score
from sklearn import metrics
metrics.accuracy_score(y_test,y_pred)
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_pred)
mat
from sklearn.metrics import classification_report
target_names = ['Diabetes', 'Normal']
print(classification_report(y_test, y_pred, target_names=target_names))
#For selecting K value
error_rate = []
# Will take some time
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train,y_train)
pred_i = knn.predict(X_test)
error_rate.append(np.mean(pred_i != y_test))
import matplotlib.pyplot as plt
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed', marker='o',
markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
#From graph we can see that optimize k value is 16,17,18
# Now we will train our KNN classifier with this k values
knn=KNeighborsClassifier(n_neighbors=18,metric='euclidean',p=2)
knn.fit(X_train,y_train)
y_pred=knn.predict(X_test)
y_pred
knn.score(X_test,y_test)
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(mat, annot=True)
from sklearn.metrics import classification_report
target_names = ['Diabetes', 'Normal']
print(classification_report(y_test, y_pred, target_names=target_names))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
15571,
37,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
198,
220,
220,
220,
3740,
1378,
403... | 2.495748 | 1,176 |
"""
Replace label
"""
from collections.abc import Sequence
from typing import Tuple
import numpy as np
from numpy import dtype
from numpy.core.numeric import indices
from numpy.random import rand
from typing import Callable, Optional, Tuple
from ..pipeline.allocation_query import AllocationQuery
from ..pipeline.operation import Operation
from ..pipeline.state import State
from ..pipeline.compiler import Compiler
class ReplaceLabel(Operation):
"""Replace label of specified images.
Parameters
----------
indices : Sequence[int]
The indices of images to relabel.
new_label : int
The new label to assign.
"""
| [
37811,
198,
3041,
5372,
6167,
198,
37811,
198,
6738,
17268,
13,
39305,
1330,
45835,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
288,
4906,
198,
6738,
299,
32152,
13,
7295,
13... | 3.314721 | 197 |
# Generated by Django 2.2.4 on 2019-09-07 19:33
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
19,
319,
13130,
12,
2931,
12,
2998,
678,
25,
2091,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from datetime import timedelta
from transcriptic import Connection
import logging
import time
import threading
class StrateosAccessor(object):
"""
Retrieve protocols from Strateos
"""
SYNC_PERIOD = timedelta(minutes=10)
logger = logging.getLogger('intent_parser_strateos_accessor')
def get_protocol(self, protocol):
"""
Get default parameter values for a given protocol.
Args:
protocol: name of protocol
Return:
A dictionary. The key represent a parameter.
The value represents a parameter's default value.
Raises:
An Exception to indicate if a given protocol does not exist when calling the Strateos API.
"""
self.protocol_lock.acquire()
if protocol not in self.protocols:
raise Exception('Unable to get %s from Strateos' % protocol)
selected_protocol = self.protocols[protocol]['inputs']
self.protocol_lock.release()
return self._get_protocol_default_values(selected_protocol)
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
14687,
291,
1330,
26923,
198,
11748,
18931,
198,
11748,
640,
220,
198,
11748,
4704,
278,
198,
198,
4871,
4285,
378,
418,
15457,
273,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
... | 2.420479 | 459 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
class RateLimiterBackend(six.with_metaclass(abc.ABCMeta, object)):
"""
A backend that implements an atomic ``leak_and_increase_bucket``.
"""
def leak_and_increase_bucket(self, key, limit_max, limit_window):
"""
Leaks the bucket with the given key (and initializes one if needed) based on the given
``limit_max`` (maximum height of bucket) and ``limit_window`` (duration in seconds over which
``limit_max`` leaks out), and then increases the bucket.
.. note:
The bucket does not increase above its maximum (i.e., failed operations
do not count against the limit).
:return: whether the bucket is too full (i.e., rate limit exceeded) or not
"""
raise NotImplementedError()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,... | 2.848397 | 343 |
from datetime import datetime
import re
import requests
from app.spider.spider_utils import get_int_from_str
ua = 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
headers = {
'User-Agent': ua
}
# 得到目前已有的所有news
# 得到一页的news
init_news()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
302,
198,
11748,
7007,
198,
6738,
598,
13,
2777,
1304,
13,
2777,
1304,
62,
26791,
1330,
651,
62,
600,
62,
6738,
62,
2536,
198,
198,
6413,
796,
705,
12982,
12,
36772,
25,
29258,
14,
20,... | 2.181208 | 149 |
from flask import request, render_template
def render_navbar(context, slot, payload): # pylint: disable=R0201,W0613
""" Base template slot """
chapter = request.args.get('chapter', '')
module = request.args.get('module', '')
return render_template("common/navbar.html", active_chapter=chapter, module=module, config=payload)
| [
6738,
42903,
1330,
2581,
11,
8543,
62,
28243,
628,
198,
4299,
8543,
62,
28341,
5657,
7,
22866,
11,
10852,
11,
21437,
2599,
220,
1303,
279,
2645,
600,
25,
15560,
28,
49,
15,
1264,
11,
54,
3312,
1485,
198,
220,
220,
220,
37227,
7308,
... | 3.127273 | 110 |
from marshmallow import fields, post_load
from portals.wwits.apis.rest import BaseSchemaExcludeFields as Schema
from .models import ParmModel, WOLaborActivityModel
| [
6738,
22397,
42725,
1330,
7032,
11,
1281,
62,
2220,
198,
6738,
42604,
13,
1383,
896,
13,
499,
271,
13,
2118,
1330,
7308,
27054,
2611,
3109,
9152,
15878,
82,
355,
10011,
2611,
198,
6738,
764,
27530,
1330,
47796,
17633,
11,
370,
3535,
4... | 3.608696 | 46 |
import functools
import pathlib
import random
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
import cauldron
from cauldron import runner
def _mock_reload_module(history: dict, path: str, library_directory: str):
"""Mocked version of the runner.__init__._reload_module function."""
output = {"path": path, "library_directory": library_directory}
if path not in history and random.random() > 0.5:
history[path] = output
raise ValueError("Faking that this did not go well for the first time.")
history[path] = output
return output
@patch("cauldron.runner._reload_module")
def test_reload(reload_module: MagicMock):
"""Should reload as expected."""
history = {}
reload_module.side_effect = functools.partial(_mock_reload_module, history)
library_directory = pathlib.Path(cauldron.__file__).resolve().parent
output = runner.reload_libraries([None, str(library_directory)])
me = pathlib.Path(__file__).resolve()
root = pathlib.Path(cauldron.__file__).resolve()
keys = list(history.keys())
assert keys.index(str(me)) < keys.index(str(root)), """
Expecting deeper hierarchy to be reloaded first.
"""
assert output, "Expect a non-empty list returned."
@patch("cauldron.runner._reload_module")
def test_reload_failure(reload_module: MagicMock):
"""Should raise RuntimeError if imports fail after many retries."""
reload_module.side_effect = ValueError("Nope")
library_directory = pathlib.Path(cauldron.__file__).resolve().parent
with pytest.raises(RuntimeError):
runner.reload_libraries([None, str(library_directory)])
| [
11748,
1257,
310,
10141,
198,
11748,
3108,
8019,
198,
11748,
4738,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
269,
45... | 2.943662 | 568 |
import json
import re
import numpy
import dynet as dy
import random
# For BioASQ only when using years 1-4 as training and 5 as dev/test.
# Get rid of 2017/2018 plus 2016 in training.
# For competition, training get rid of 2017/2018 and test 2018.
bioclean = lambda t: re.sub('[.,?;*!%^&_+():-\[\]{}]', '', t.replace('"', '').replace('/', '').replace('\\', '').replace("'", '').strip().lower()).split()
| [
11748,
33918,
198,
11748,
302,
198,
11748,
299,
32152,
198,
11748,
37860,
316,
355,
20268,
198,
11748,
4738,
198,
198,
2,
1114,
16024,
1921,
48,
691,
618,
1262,
812,
352,
12,
19,
355,
3047,
290,
642,
355,
1614,
14,
9288,
13,
198,
2,... | 2.892857 | 140 |
#
# Copyright 2018, 2020 Antoine Sanner
# 2019 Lars Pastewka
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import pytest
import numpy as np
import numpy.ma as ma
from NuMPI.Tools import Reduction
@pytest.fixture
@pytest.mark.parametrize('arr', [
[1, 2.1, 3],
np.array((1, 2.1, 3)),
ma.masked_array([1, 2.1, 3])
])
@pytest.mark.parametrize('arr', [
[[1, 2.1, 3],
[4, 5, 6]],
np.array(((1, 2.1, 3),
(4, 5, 6))),
ma.masked_array([[1, 2.1, 3],
[4, 5, 6]])
])
@pytest.mark.parametrize('arr', [
np.array(((1, 2.1, 3),
(4, 5, 6))),
ma.masked_array([[1, 2.1, 3],
[4, 5, 6]])
])
@pytest.mark.parametrize('arr', [
np.reshape(np.array((-1, 1, 5, 4,
4, 5, 4, 5,
7, 0, 1, 0.), dtype=float), (3, 4)),
ma.masked_array(np.reshape(np.array((-1, 1, 5, 4,
4, 5, 4, 5,
7, 0, 1, 0.), dtype=float), (3, 4)))
])
@pytest.mark.parametrize('arr', [
np.reshape(np.array((-1, 1, 5, 4,
4, 5, 4, 5,
7, 0, 1, 0), dtype=int), (3, 4)),
ma.masked_array(np.reshape(np.array((-1, 1, 5, 4,
4, 5, 4, 5,
7, 0, 1, 0), dtype=int), (3, 4)))
])
def test_max_min_empty(pnp):
"""
Sometimes the input array is empty
"""
if pnp.comm.Get_size() >= 2:
if pnp.comm.Get_rank() == 0:
local_arr = np.array([], dtype=float)
else:
local_arr = np.array([1, 0, 4], dtype=float)
assert pnp.max(local_arr) == 4
assert pnp.min(local_arr) == 0
if pnp.comm.Get_rank() == 0:
local_arr = np.array([1, 0, 4], dtype=float)
else:
local_arr = np.array([], dtype=float)
assert pnp.max(local_arr) == 4
assert pnp.min(local_arr) == 0
else:
local_arr = np.array([], dtype=float)
# self.assertTrue(np.isnan(pnp.max(local_arr)))
# self.assertTrue(np.isnan(pnp.min(local_arr)))
assert pnp.max(local_arr) == np.finfo(local_arr.dtype).min
assert pnp.min(local_arr) == np.finfo(local_arr.dtype).max
@pytest.mark.parametrize('arr', [
np.reshape(np.array((-1, 1, 5, 4,
4, 5, 4, 5,
7, 0, 1, 0), dtype=float), (3, 4)),
ma.masked_array(np.reshape(np.array((-1, 1, 5, 4,
4, 5, 4, 5,
7, 0, 1, 0), dtype=float), (3, 4)))
])
@pytest.mark.parametrize('locval', [
np.array([False, False, True]),
ma.masked_array([False, False, True])
])
@pytest.mark.parametrize('locval', [
np.array([True, True, True]),
ma.masked_array([True, True, True])
])
| [
2,
198,
2,
15069,
2864,
11,
12131,
3738,
42722,
2986,
1008,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
13130,
31239,
11303,
413,
4914,
198,
2,
198,
2,
44386,
17168,
5964,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520... | 2.002538 | 1,970 |
import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib2tikz import save as tikz_save
from matplotlib2tikz import get_tikz_code
from .. import tensorflow
class Analyzer:
"""DeepOBS analyzer class to generate result plots or get other summaries.
Args:
path (str): Path to the results folder. This folder should contain one
or multiple testproblem folders.
Attributes:
testproblems: Dictionary of test problems where the key is the
name of a test problem (e.g. ``cifar10_3c3d``) and the value is an
instance of the TestProblemAnalyzer class (see below).
"""
def __init__(self, path):
"""Initializes a new Analyzer instance.
Args:
path (str): Path to the results folder. This folder should contain one
or multiple testproblem folders.
"""
self.path = path
self.testproblems = self._read_testproblems()
def _read_testproblems(self):
"""Read all test problems (folders) in this results folder.
Returns:
dict: Dictionary of test problems, where the key is the test
problem's name and the value is an instance of the
TestProblemAnalyzer class.
"""
testproblems = dict()
for tp in os.listdir(self.path):
if os.path.isdir(os.path.join(self.path, tp)):
testproblems[tp] = TestProblemAnalyzer(self.path, tp)
return testproblems
class TestProblemAnalyzer:
"""DeepOBS analyzer class for a specific test problem.
This class will store all relevant information regarding a test problem,
such as the convergence performance of this problem.
Args:
path (str): Path to the parent folder of the test problem (i.e. the
results folder).
tp (str): Name of the test problem (same as the folder name).
Attributes:
name: Name of the test problem in DeepOBS format
(e.g. ``cifar10_3c3d``).
conv_perf: Convergence performance for this test problem.
metric: Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
optimizer: Dictionary of optimizers for this test problem where
the key is the name of the optimizer (e.g.
``GradientDescentOptimizer``) and the value is an instance of the
OptimizerAnalyzer class (see below).
"""
def __init__(self, path, tp):
"""Initializes a new TestProblemAnalyzer instance.
Args:
path (str): Path to the parent folder of the test problem (i.e. the
results folder).
tp (str): Name of the test problem (same as the folder name).
"""
self._path = os.path.join(path, tp)
self.name = tp
print("Setting up", self.name)
self.conv_perf = self._get_conv_perf()
if tp == 'quadratic_deep' or tp == 'mnist_vae' or tp == 'fmnist_vae':
self.metric = "test_losses"
else:
self.metric = "test_accuracies"
self.optimizers = self._read_optimizer()
def _read_optimizer(self):
"""Read all optimizer (folders) in a test problem (folder).
Returns:
dict: Dictionary of optimizers, where the key is the optimizer's name
and the value is an instance of the OptimizerAnalyzer class.
"""
optimizers = dict()
for opt in os.listdir(self._path):
optimizers[opt] = OptimizerAnalyzer(self._path, opt, self.metric,
self.name, self.conv_perf)
return optimizers
def _get_conv_perf(self):
"""Read the convergence performance for this test problem from a
dictionary in the baseline folder.
Returns:
float: Convergence performance for this test problem
"""
try:
with open(os.path.join(tensorflow.config.get_baseline_dir(),
"convergence_performance.json"), "r") as f:
return json.load(f)[self.name]
except IOError:
print("Warning: Could not find a convergence performance file.")
return 0.0
class OptimizerAnalyzer:
"""DeepOBS analyzer class for an optimizer (and a specific test problem).
This class will give access to all relevant information regarding this
optimizer such as the best performing hyperparameter setting or the number
of settings.
Args:
path (str): Path to the parent folder of the optimizer folder (i.e. the
test problem folder).
opt (str): Name of the optimizer (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this optimizer (folder)
belongs to.
conv_perf (float): Convergence performance of the test problem this
optimizer (folder) belongs to.
Attributes:
name: Name of the optimizer (folder).
metric: Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem: Name of the test problem this optimizer (folder)
belongs to.
conv_perf: Convergence performance for this test problem.
settings: Dictionary of hyperparameter settings for this
optimizer (on this test problem) where the key is the name of the
setting (folder) and the value is an instance of the
SettingAnalyzer class (see below).
num_settings: Total number of settings for this optimizer
(and test problem)
"""
def __init__(self, path, opt, metric, testproblem, conv_perf):
"""Initializes a new OptimizerAnalyzer instance.
Args:
path (str): Path to the parent folder of the optimizer folder (i.e.
the test problem folder).
opt (str): Name of the optimizer (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this optimizer (folder)
belongs to.
conv_perf (float): Convergence performance of the test problem this
optimizer (folder) belongs to.
"""
self._path = os.path.join(path, opt)
self.name = opt
self.metric = metric
self.testproblem = testproblem
self.conv_perf = conv_perf
self.settings = self._read_settings()
self.num_settings = len(self.settings)
self._best_setting_final = None
self._best_setting_best = None
def _read_settings(self):
"""Read all settings (folders) in a optimizer (folder).
Returns:
dict: Dictionary of settings, where the key is the setting's name
and the value is an instance of the SettingAnalyzer class.
"""
settings = dict()
for sett in os.listdir(self._path):
settings[sett] = SettingAnalyzer(self._path, sett, self.metric,
self.testproblem, self.conv_perf)
return settings
def get_best_setting_final(self):
"""Returns the setting for this optimizer that has the best final
performance using the metric (``test_losses`` or ``test_accuracies``)
defined for this test problem.
Returns:
SettingAnalyzer: Instance of the SettingAnalyzer class with the best
final performance
"""
if self._best_setting_final is not None:
return self._best_setting_final
else:
if self.metric == 'test_losses' or self.metric == 'train_losses':
current_best = np.inf
better = lambda x, y: x < y
elif self.metric == 'test_accuracies' or self.metric == 'train_accuracies':
current_best = -np.inf
better = lambda x, y: x > y
else:
raise RuntimeError("Metric unknown")
best_sett = None
for _, sett in self.settings.items():
val = sett.aggregate.final_value
if better(val, current_best):
current_best = val
best_ind = sett
self._best_setting_final = best_ind
return best_ind
def get_best_setting_best(self):
"""Returns the setting for this optimizer that has the best overall
performance using the metric (``test_losses`` or ``test_accuracies``)
defined for this test problem. In contrast to ``get_best_setting_final``
in not only looks at the final performance per setting, but the best
performance per setting.
Returns:
SettingAnalyzer: Instance of the SettingAnalyzer class with the best
overall performance
"""
if self._best_setting_best is not None:
return self._best_setting_best
else:
if self.metric == 'test_losses' or self.metric == 'train_losses':
current_best = np.inf
better = lambda x, y: x < y
elif self.metric == 'test_accuracies' or self.metric == 'train_accuracies':
current_best = -np.inf
better = lambda x, y: x > y
else:
raise RuntimeError("Metric unknown")
best_sett = None
for _, sett in self.settings.items():
val = sett.aggregate.best_value
if better(val, current_best):
current_best = val
best_ind = sett
self._best_setting_best = best_ind
return best_ind
def get_setting_most_runs(self):
"""Returns the setting with the most repeated runs (with the same
setting, but probably different seeds).
Returns:
SettingAnalyzer: Instance of the SettingAnalyzer class with the most
repeated runs.
"""
most_runs = 0
for _, sett in self.settings.items():
if sett.aggregate.num_runs > most_runs:
most_runs = sett.aggregate.num_runs
most_run_setting = sett
return most_run_setting
def plot_lr_sensitivity(self, ax, mode='final'):
"""Generates the ``learning rate`` sensitivity plot for this optimizer.
This plots the relative performance (relative to the best setting for
this optimizer) against the ``learning rate`` used in this setting.
This assumes that all settings or otherwise equal and only different in
the ``learning rate``.
Args:
ax (matplotlib.axes): Handle to a matplotlib axis to plot the
``learning rate`` sensitivity onto.
mode (str): Whether to use the final (``final``) performance or the
best (``best``) when evaluating each setting.
Defaults to ``final``.
"""
rel_perf = []
lr = []
for _, sett in self.settings.items():
if mode == 'final':
val = sett.aggregate.final_value
best = self.get_best_setting_final().aggregate.final_value
elif mode == 'best':
val = sett.aggregate.best_value
best = self.get_best_setting_best().aggregate.best_value
else:
raise RuntimeError("Mode unknown")
if self.metric == 'test_losses' or self.metric == 'train_losses':
rel_perf.append(best / val)
elif self.metric == 'test_accuracies' or self.metric == 'train_accuracies':
rel_perf.append(val / best)
else:
raise RuntimeError("Metric unknown")
lr.append(sett.aggregate.output['learning_rate'])
rel_perf = np.nan_to_num(rel_perf) # replace NaN with zero
rel_perf = np.array(np.vstack((rel_perf, lr))).transpose()
rel_perf = rel_perf[rel_perf[:, 1].argsort()]
ax.plot(rel_perf[:, 1], rel_perf[:, 0], label=self.name)
ax.set_xscale('log')
ax.set_ylim([0.0, 1.0])
def plot_performance(self, ax, mode='most'):
"""Generates a performance plot for this optimzer using one
hyperparameter setting.
Can either use the setting with the best final performance, the best
overall performance or the setting with the most runs.
This function will plot all four possible performance metrics
(``test_losses``, ``train_losses``, ``test_accuracies`` and
``train_accuracies``).
Args:
ax (list): List of four matplotlib axis to plot the performancs
metrics onto.
mode (str): Whether to use the setting with the best final
(``final``) performance, the best overall (``best``) performance
or the setting with the most runs (``most``) when plotting.
Defaults to ``most``.
"""
if mode == 'final':
run = self.get_best_setting_final()
elif mode == 'best':
run = self.get_best_setting_best()
elif mode == 'most':
run = self.get_setting_most_runs()
print("Plotting", run.aggregate.num_runs, "runs for ", self.name,
"on", run.aggregate.output['testproblem'])
else:
raise RuntimeError("Mode unknown")
for idx, metric in enumerate([
'test_losses', 'train_losses', 'test_accuracies',
'train_accuracies'
]):
ax[idx].plot(
run.aggregate.output[metric]['mean'],
label=run.aggregate.output['optimizer'])
ax[idx].fill_between(
range(run.aggregate.output[metric]['mean'].size),
run.aggregate.output[metric]['mean'] -
run.aggregate.output[metric]['std'],
run.aggregate.output[metric]['mean'] +
run.aggregate.output[metric]['std'],
color=ax[idx].get_lines()[-1].get_color(),
alpha=0.2)
def get_bm_table(self, perf_table, mode='most'):
"""Generates the overall performance table for this optimizer.
This includes metrics for the performance, speed and tuneability of this
optimizer (on this test problem).
Args:
perf_table (dict): A dictionary with three keys: ``Performance``,
``Speed`` and ``Tuneability``.
mode (str): Whether to use the setting with the best final
(``final``) performance, the best overall (``best``) performance
or the setting with the most runs (``most``).
Defaults to ``most``.
Returns:
dict: Dictionary with holding the performance, speed and tuneability
measure for this optimizer.
"""
if mode == 'final':
run = self.get_best_setting_final()
elif mode == 'best':
run = self.get_best_setting_best()
elif mode == 'most':
run = self.get_setting_most_runs()
perf_table['Performance'][self.name] = run.aggregate.output[
self.metric]['mean'][-1]
perf_table['Speed'][self.name] = run.aggregate.output['speed']
perf_table['Tuneability'][self.name] = {
**{
'lr': '{:0.2e}'.format(run.aggregate.output['learning_rate'])
},
**run.aggregate.output['hyperparams']
}
return perf_table
class SettingAnalyzer:
"""DeepOBS analyzer class for a setting (a hyperparameter setting).
Args:
path (str): Path to the parent folder of the setting folder (i.e. the
optimizer folder).
sett (str): Name of the setting (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this setting (folder)
belongs to.
conv_perf (float): Convergence performance of the test problem this
setting (folder) belongs to.
Attributes:
name (str): Name of the setting (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this setting (folder)
belongs to.
conv_perf (float): Convergence performance for this test problem.
aggregate (AggregateRun): Instance of the AggregateRun class for all
runs with this setting.
"""
def __init__(self, path, sett, metric, testproblem, conv_perf):
"""Initializes a new SettingAnalyzer instance.
Args:
name (str): Path to the parent folder of the setting folder (i.e. the
optimizer folder).
sett (str): Name of the setting (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this setting (folder)
belongs to.
conv_perf (float): Convergence performance of the test problem this
setting (folder) belongs to.
"""
self._path = os.path.join(path, sett)
self.name = sett
self.metric = metric
self.testproblem = testproblem
self.conv_perf = conv_perf
self.aggregate = self._get_aggregate()
def _get_aggregate(self):
"""Create aggregate run for all runs in this setting folder.
Returns:
AggregateRun: Instance of the AggregateRun class holding the
aggregate information of all runs with these settings.
"""
runs = []
for r in os.listdir(self._path):
if r.endswith(".json"):
runs.append(r)
return AggregateRun(self._path, runs, self.name, self.metric,
self.testproblem, self.conv_perf)
class AggregateRun:
"""DeepOBS class for a group of runs witht the same settings (but possibly
different seeds).
Args:
path (str): Path to the parent folder of the aggregate run folder (i.e.
the settings folder).
runs (list): List of run names all with the same setting.
name (str): Name of the aggregate run (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this aggregate run (folder)
belongs to.
conv_perf (float): Convergence performance of the test problem this
aggregate run (folder) belongs to.
Attributes:
name: Name of the aggregate run (folder).
testproblem: Name of the test problem this aggregate run (folder)
belongs to.
conv_perf: Convergence performance for this test problem.
runs: List of run names all with the same setting.
num_runs: Number of runs (with the same setting).
metric: Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
output: Dictionary including all aggregate information of the
runs with this setting. All performance metrics have a mean and a
standard deviation (can be zero if there is only one run with this
setting).
final_value: Final (mean) value of the test problem's metric
best_value: Best (mean) value of the test problem's metric
"""
def __init__(self, path, runs, name, metric, testproblem, conv_perf):
"""Initializes a new AggregateRun class.
Args:
path (str): Path to the parent folder of the aggregate run folder (i.e.
the settings folder).
runs (list): List of run names all with the same setting.
name (str): Name of the aggregate run (folder).
metric (str): Metric to use for this test problem. If available this
will be ``test_accuracies``, otherwise ``test_losses``.
testproblem (str): Name of the test problem this aggregate run (folder)
belongs to.
conv_perf (float): Convergence performance of the test problem this
aggregate run (folder) belongs to.
"""
self._path = path
self.name = name
self.testproblem = testproblem
self.conv_perf = conv_perf
self.runs = runs
self.num_runs = len(runs)
self.metric = metric
self.output = self._aggregate()
self.final_value = self._get_final_value()
self.best_value = self._get_best_value()
def _aggregate(self):
"""Aggregate performance data over all runs.
Returns:
dict: Dictionary including all aggregate information of the
runs with this setting. All performance metrics have a mean and a
standard deviation (can be zero if there is only one run with this
setting).
"""
train_losses = []
train_accuracies = []
test_losses = []
test_accuracies = []
meta_loaded = False
for run in self.runs:
output = self._load_json(os.path.join(self._path, run))
# Get meta data from first run
if not meta_loaded:
meta = output
meta_loaded = True
train_losses.append(output['train_losses'])
test_losses.append(output['test_losses'])
if 'train_accuracies' in output:
train_accuracies.append(output['train_accuracies'])
test_accuracies.append(output['test_accuracies'])
aggregate = dict()
# compute speed
perf = np.array(eval(self.metric))
if self.metric == "test_losses" or self.metric == "train_losses":
# average over first time they reach conv perf (use num_epochs if conv perf is not reached)
aggregate['speed'] = np.mean(
np.argmax(perf <= self.conv_perf, axis=1) +
np.invert(np.max(perf <= self.conv_perf, axis=1)) *
perf.shape[1])
elif self.metric == "test_accuracies" or self.metric == "train_accuracies":
aggregate['speed'] = np.mean(
np.argmax(perf >= self.conv_perf, axis=1) +
np.invert(np.max(perf >= self.conv_perf, axis=1)) *
perf.shape[1])
# build dict
for m in [
'train_losses', 'test_losses', 'train_accuracies',
'test_accuracies'
]:
aggregate[m] = {
'mean': np.mean(eval(m), axis=0),
'std': np.std(eval(m), axis=0)
}
# merge meta and aggregate (aggregate replaces)
aggregate = {**meta, **aggregate}
aggregate.pop('minibatch_train_losses', None)
return aggregate
def _load_json(self, path):
"""Load the ``JSON`` file of the given path.
Args:
path (str): Path to a ``JSON`` file.
Returns:
dict: Dictionary from the ``JSON`` file.
"""
with open(path, "r") as f:
return json.load(f)
def _get_final_value(self):
"""Get final (mean) value of the metric used in this test problem.
Returns:
float: Final (mean) value of the test problem's metric.
"""
return self.output[self.metric]['mean'][-1]
def _get_best_value(self):
"""Get best (mean) value of the metric used in this test problem.
Returns:
float: Best (mean) value of the test problem's metric.
"""
if self.metric == 'test_losses' or self.metric == 'train_losses':
return min(self.output[self.metric]['mean'])
elif self.metric == 'test_accuracies' or self.metric == 'train_accuracies':
return max(self.output[self.metric]['mean'])
else:
raise RuntimeError("Metric unknown")
def beautify_lr_sensitivity(fig, ax):
"""Beautify a learning rate sensitivity plot.
This function adds axis labels and removes spines to create a nicer learning
rate sensitivity plot.
Args:
fig (matplotlib.figure): Handle to the matplotlib figure of the learning
rate sensitivity plot.
ax (list): List of lists of matplotlib axis of the learning rate
sensitivity plots.
Returns:
matplotlib.figure: Handle to the beautified matplotlib figure of the
learning rate sensitivity plot.
list: List of lists of the beautified matplotlib axis of the learning
rate sensitivity plots.
"""
fig.suptitle("Learning rate sensitivity", fontsize=20)
for i in range(ax.shape[0]):
for j in range(ax.shape[1]):
ax[i][j].get_yaxis().set_visible(False)
ax[i][j].spines['top'].set_visible(False)
ax[i][j].spines['right'].set_visible(False)
# ax[i][j].spines['bottom'].set_visible(False)
ax[i][j].spines['left'].set_visible(False)
if i == 0:
ax[i][j].get_xaxis().set_visible(False)
if i == 1:
ax[i][j].set_xlabel('Learning Rate')
return fig, ax
def texify_lr_sensitivity(fig, ax):
"""Write a ``.tex`` file with the learning rate sensitivity plot.
The function will create a file named `tuning_plot.tex` with the latex code
for the learning rate sensitivity plot.
Args:
fig (matplotlib.figure): Handle to the matplotlib figure of the learning
rate sensitivity plot.
ax (list): List of lists of matplotlib axis of the learning rate
sensitivity plots.
Returns:
str: String of the latex code for the learning rate sensitivity plot.
"""
tikz_code = get_tikz_code(
'tuning_plot_new.tex',
figureheight='\\figureheight',
figurewidth='0.33\\figurewidth')
tikz_code = tikz_code.replace(
'\\begin{groupplot}[group style={group size=4 by 2}]',
'\\begin{groupplot}[group style={group size=4 by 2, horizontal sep=0.02\\figurewidth, vertical sep=0.15cm}]'
)
tikz_code = r"\pgfplotsset{every axis/.append style={label style={font=\tiny}, tick label style={font=\tiny}, legend style={font=\tiny, line width=1pt}}}" + tikz_code
tikz_code = tikz_code.replace('minor', '%minor') # comment minor tick
tikz_code = tikz_code.replace('x grid',
'%x grid') # remove grid xmajorticks=false,
tikz_code = tikz_code.replace('y grid', '%y grid') # remove grid
tikz_code = tikz_code.replace('tick align',
'%tick align') # ugly outside ticks
tikz_code = tikz_code.replace(
'nextgroupplot[', 'nextgroupplot[axis x line*=bottom,\nhide y axis,'
) # ugly outside ticks
tikz_code = tikz_code.replace(
'(current bounding box.south west)!0.98!(current bounding box.north west)',
'(current bounding box.south west)!1.05!(current bounding box.north west)'
) # position title higher
tikz_code = tikz_code.replace('title={',
'title={\small ') # shrink title size
# Write the file out again
with open('tuning_plot.tex', 'w') as file:
file.write(tikz_code)
return tikz_code
def rescale_ax(ax):
"""Rescale an axis to include the most important data.
Args:
ax (matplotlib.axis): Handle to a matplotlib axis.
"""
lines = ax.lines
y_data = []
y_limits = []
for line in lines:
if line.get_label() != "convergence_performance":
y_data.append(line.get_ydata())
else:
y_limits.append(line.get_ydata()[0])
if y_data:
y_limits.append(np.percentile(np.array(y_data), 20))
y_limits.append(np.percentile(np.array(y_data), 80))
y_limits = y_limits + (np.array(y_data)[:, -1].tolist())
y_limits = [np.min(y_limits), np.max(y_limits)]
y_limits = [y_limits[0] * 0.9, y_limits[1] * 1.1]
if y_limits[0] != y_limits[1]:
ax.set_ylim([max(1e-10, y_limits[0]), y_limits[1]])
ax.margins(x=0)
else:
ax.set_ylim([1.0, 2.0])
def beautify_plot_performance(fig, ax, folder_parser, problem_set):
"""Beautify a performance plot.
This function adds axis labels, sets titles and more to create a nicer
performance plot.
Args:
fig (matplotlib.figure): Handle to the matplotlib figure of the
performance plot.
ax (list): List of lists of matplotlib axis of the performance plot.
folder_parser (Analyzer): An instance of the DeepOBS Analyzer class
to plot the performance from.
problem_set (str): Can either be ``small`` or ``large`` to switch
between which benchmark set is being plotted.
Returns:
matplotlib.figure: Handle to the beautified matplotlib figure of the
performance plot.
list: List of lists of the beautified matplotlib axis of the performance
plots.
"""
fig.subplots_adjust(hspace=0.4)
if problem_set == "small":
fig.suptitle("Benchmark Set Small", fontsize=20)
titles = [
"P1 Quadratic Deep", "P2 MNIST - VAE", "P3 F-MNIST - CNN",
"P4 CIFAR-10 - CNN"
]
# clear axis (needed for matplotlib2tikz)
plt.sca(ax[2][0])
plt.cla()
plt.sca(ax[2][1])
plt.cla()
plt.sca(ax[3][1])
plt.cla()
ax[2][1].axis('off')
ax[3][1].axis('off')
ax[1][0].set_xlabel("Epochs")
ax[1][1].set_xlabel("Epochs")
ax[2][2].set_ylabel("Test Accuracy")
ax[3][2].set_ylabel("Train Accuracy")
ax[1][1].tick_params(
axis='x', which='major', bottom=False,
labelbottom=True) # show x axis
# Add convergence performance line
for idx, tp in enumerate(
["quadratic_deep", "mnist_vae", "fmnist_2c2d", "cifar10_3c3d"]):
if tp in folder_parser.testproblems:
metric = folder_parser.testproblems[tp].metric
conv_perf = folder_parser.testproblems[tp].conv_perf
if metric == "test_losses":
ax_row = 0
elif metric == "test_accuracies":
ax_row = 2
ax[ax_row][idx].axhline(
conv_perf, color='#AFB3B7', label="convergence_performance")
elif problem_set == "large":
fig.suptitle("Benchmark Set Large", fontsize=20)
ax[1][0].set_xlabel("Epochs")
ax[3][1].set_xlabel("Epochs")
ax[2][1].set_ylabel("Test Accuracy")
ax[3][1].set_ylabel("Train Accuracy")
titles = [
"P5 F-MNIST - VAE", "P6 CIFAR 100 - All CNN C",
"P7 SVHN - Wide ResNet 16-4", "P8 Tolstoi - Char RNN"
]
# Add convergence performance line
for idx, tp in enumerate([
"fmnist_vae", "cifar100_allcnnc", "svhn_wrn164",
"tolstoi_char_rnn"
]):
if tp in folder_parser.testproblems:
metric = folder_parser.testproblems[tp].metric
conv_perf = folder_parser.testproblems[tp].conv_perf
if metric == "test_losses":
ax_row = 0
elif metric == "test_accuracies":
ax_row = 2
ax[ax_row][idx].axhline(
conv_perf, color='#AFB3B7', label="convergence_performance")
# clear axis (needed for matplotlib2tikz)
plt.sca(ax[2][0])
plt.cla()
plt.sca(ax[3][0])
plt.cla()
ax[2][0].axis('off')
ax[3][0].axis('off')
ax[3][2].set_xlabel("Epochs")
ax[3][3].set_xlabel("Epochs")
ax[0][0].set_ylabel("Test Loss")
ax[1][0].set_ylabel("Train Loss")
ax[1][0].tick_params(
axis='x', which='major', bottom=False, labelbottom=True) # show x axis
# automatic rescaling
for axlist in ax:
for a in axlist:
a = rescale_ax(a)
# Legend
handles, labels = ax[0][3].get_legend_handles_labels()
# labels_tex = [tfobs.plot_utils.texify(l) for l in labels]
ax[3][0].legend(
handles,
labels,
loc='upper right',
bbox_to_anchor=(0.2, 1.1, 0.5, 0.5))
for idx, title in enumerate(titles):
ax[0, idx].set_title(title)
return fig, ax
def texify_plot_performance(fig, ax, problem_set):
"""Write a ``.tex`` file with the performance plot.
The function will create a file named `benchmark_small.tex` or
`benchmark_large.tex` with the latex code for the performance plot.
Args:
fig (matplotlib.figure): Handle to the matplotlib figure of the
performance plot.
ax (list): List of lists of matplotlib axis of the performance plot.
problem_set (str): Can either be ``small`` or ``large`` to switch
between which benchmark set is being plotted.
Returns:
str: String of the latex code for the learning rate sensitivity plot.
"""
file_name = 'benchmark_' + str(problem_set) + '.tex'
tikz_code = get_tikz_code(
file_name, figureheight='\\figureheight', figurewidth='\\figurewidth')
tikz_code = r"\pgfplotsset{every axis/.append style={label style={font=\tiny}, tick label style={font=\tiny}, legend style={font=\tiny, line width=1pt}}}" + tikz_code
tikz_code = tikz_code.replace('minor', '%minor') # comment minor tick
tikz_code = tikz_code.replace('x grid', '%x grid') # remove grid
tikz_code = tikz_code.replace('y grid', '%y grid') # remove grid
tikz_code = tikz_code.replace('tick align',
'%tick align') # ugly outside ticks
tikz_code = tikz_code.replace(
'nextgroupplot[',
'nextgroupplot[axis x line*=bottom,\naxis y line*=left,'
) # ugly outside ticks
tikz_code = tikz_code.replace('xlabel={Epochs},\nxmajorticks=false,',
'xlabel={Epochs},\nxmajorticks=true,'
) # if x label is epoch, show ticks
tikz_code = tikz_code.replace('ymajorticks=false,',
'ymajorticks=true,') # show y labels
tikz_code = tikz_code.replace('\mathdefault',
'') # remove mathdefault in labels
tikz_code = tikz_code.replace(
'\path [draw=white!80.0!black, fill opacity=0]',
'%\path [draw=white!80.0!black, fill opacity=0]'
) # remove lines that are created for some reason
tikz_code = tikz_code.replace(
'(current bounding box.south west)!0.98!(current bounding box.north west)',
'(current bounding box.south west)!1.05!(current bounding box.north west)'
) # position title higher
tikz_code = tikz_code.replace('title={',
'title={\small ') # shrink title size
tikz_code = tikz_code.replace(
'group style={group size=4 by 4',
'group style={group size=4 by 4, horizontal sep=1cm, vertical sep=0.4cm '
) # reduce separation between plots
tikz_code = tikz_code.replace(
'ylabel={Test Loss}', r'ylabel style={align=left}, ylabel=Test\\Loss'
) # y label in two lines
tikz_code = tikz_code.replace(
'ylabel={Test Accuracy}',
r'ylabel style={align=left}, ylabel=Test\\Accuracy'
) # y label in two lines
tikz_code = tikz_code.replace(
'ylabel={Train Loss}', r'ylabel style={align=left}, ylabel=Train\\Loss'
) # y label in two lines
tikz_code = tikz_code.replace(
'ylabel={Train Accuracy}',
r'ylabel style={align=left}, ylabel=Train\\Accuracy'
) # y label in two lines
# Write the file out again
with open(file_name, 'w') as file:
file.write(tikz_code)
return tikz_code
def beautify_plot_table(bm_table):
"""Beautify a performance table.
This function makes a few changes to the performance table to make it nicer.
Args:
bm_table (dict): Dictionary holding all the information for the
performance table.
Returns:
pandas.dataframe: A pandas data frame for the performance table.
"""
bm_table_pd = pd.DataFrame.from_dict({(i, j): bm_table[i][j]
for i in bm_table.keys()
for j in bm_table[i].keys()}).T
cols = list(bm_table_pd.columns.values)
if 'AdamOptimizer' in cols:
cols.insert(0, cols.pop(cols.index('AdamOptimizer')))
if 'MomentumOptimizer' in cols:
cols.insert(0, cols.pop(cols.index('MomentumOptimizer')))
if 'GradientDescentOptimizer' in cols:
cols.insert(0, cols.pop(cols.index('GradientDescentOptimizer')))
bm_table_pd = bm_table_pd.reindex(columns=cols)
print(bm_table_pd)
return bm_table_pd
def texify_plot_table(perf_table_pd, problem_set):
"""Write a ``.tex`` file with the performance table.
The function will create a file named `performance_table_small.tex` or
`performance_table_large.tex` with the latex code for the performance table.
Args:
perf_table_pd (pandas.dataframe): Pandas data frame for the performance
table.
problem_set (str): Can either be ``small`` or ``large`` to switch
between which benchmark set is being plotted.
Returns:
str: String of the latex code for the performance table.
"""
if not perf_table_pd.empty:
# Postprocessing for Latex Output
pd.set_option('display.max_colwidth', -1)
perf_table_pd_n = perf_table_pd.apply(
norm, axis=1) # normalize between 0 and 100
perf_table_pd_n_str = perf_table_pd_n.applymap(
add_color_coding_tex) + perf_table_pd.applymap(
latex) # combine normalise version with latex color code command
perf_table_pd_n_str.columns = perf_table_pd_n_str.columns.str.replace(
'_', r'\_') # Texify the column headers
tikz_code = r"\def\cca#1#2{\cellcolor{green!#1!red}\ifnum #1<50\color{white}\fi{#2}}" +\
"\n" + r"\resizebox{\textwidth}{!}{%" + "\n" +\
perf_table_pd_n_str.to_latex(escape=False) + r"}"
with open('performance_table_' + problem_set + '.tex', 'w') as tex_file:
tex_file.write(tikz_code)
return tikz_code
def norm(x):
"""Normalize the input of x, depending on the name (higher is better if
test_acc is used, otherwise lower is better)"""
if x.name[1] == 'Tuneability':
return x
if x.min() == x.max():
return x - x.min() + 50.0
if x.name[1] == 'Performance':
if x.name[0] == "quadratic_deep" or x.name[0] == "mnist_vae" or x.name[
0] == "fmnist_vae":
return np.abs((x - x.max()) / (x.min() - x.max()) * 100)
else:
return np.abs((x - x.min()) / (x.max() - x.min()) * 100)
else:
return np.abs((x - x.max()) / (x.min() - x.max()) * 100)
def latex(input):
"""Create the latex output version of the input."""
if isinstance(input, float):
input = "%.4f" % input
return "{" + str(input) + "}"
elif isinstance(input, int):
return "{" + str(input) + "}"
elif isinstance(input, dict):
return str(input).replace('{', '').replace('}', '').replace(
"'", '').replace('_', '')
else:
return ""
def add_color_coding_tex(input):
"""Adds the latex command for color coding to the input"""
if isinstance(input, str) or isinstance(input, int) or isinstance(
input, float) and not np.isnan(input):
return "\cca{" + str(int(input)) + "}"
else:
return ""
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
17,
83,
1134,
89,
1330,
3613,
355,
... | 2.249986 | 17,949 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from importlib import import_module
from collectors.isrctn.spider import _make_start_urls
# Tests
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,... | 3.341463 | 82 |
#!/usr/bin/env python
"""
Based entirely on Django's own ``setup.py``.
"""
from setuptools import setup, find_packages
from tagging.version import __version__
setup(
name = 'django-tagging',
version = __version__,
description = 'Generic tagging application for Django',
author = 'Jonathan Buchanan',
author_email = 'jonathan.buchanan@gmail.com',
url = 'http://code.google.com/p/django-tagging/',
requires=[
'django (>=1.3)',
],
packages = find_packages(),
include_package_data=True,
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
15001,
5000,
319,
37770,
338,
898,
7559,
40406,
13,
9078,
15506,
13,
198,
37811,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
49620,
13,
9... | 2.407595 | 395 |
from setuptools import setup, find_packages
from pip._internal.req import parse_requirements
install_requirements = parse_requirements('requirements.txt', session=False)
reqs = [str(ir.req) for ir in install_requirements]
VERSION = '0.0.1'
DESCRIPTION = 'Covid-19 Data Extractor - India'
LONG_DESCRIPTION = 'Extracts Covid-19 status from "url = https://www.mohfw.gov.in/data/datanew.json" '
setup(
name='CovidStatusIndia',
version=VERSION,
author='Sudharshan Akshay',
author_email='sudharshan6acharya@gmail.com',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
package=find_packages(),
keywords=['python', 'covid-19', 'covid', 'status', 'india', 'data', 'extactor', 'json', 'sudharshan', 'akshay'],
classifiers=[
"Development Status :: 1 - Alpha",
"Intended Audience :: Developer",
"Programing Language :: Python :: 3",
"Operating System :: Operating System Independent",
],
install_requires=reqs,
packages=find_packages(where='src')
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
7347,
13557,
32538,
13,
42180,
1330,
21136,
62,
8897,
18883,
198,
198,
17350,
62,
8897,
18883,
796,
21136,
62,
8897,
18883,
10786,
8897,
18883,
13,
14116,
3256,
6246,
... | 2.708661 | 381 |
#
# AutoDock | Raccoon2
#
# Copyright 2013, Stefano Forli, Michel Sanner
# Molecular Graphics Lab
#
# The Scripps Research Institute
# _
# (,) T h e
# _/
# (.) S c r i p p s
# \_
# (,) R e s e a r c h
# ./
# ( ) I n s t i t u t e
# '
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import Tkinter, tkFont
from mglutil.util.callback import CallbackFunction
class RangeSlider(Tkinter.Frame):
"""
- added method for setting the actual range values that can be returned by the slider
- added getvalues method for getting current slider values
- added min/max labels to the range
- minor esthetic changes
# TODO
- the widget is not tolearant with other non-pack placement managers? (i.e. grid?)
remove the Tkinter packing configuration?
"""
def setrange(self, vmin=None, vmax=None):
""" defines the value of the min and max allowed values"""
#print "SLIDERSETRANGE>", vmin, vmax
if not vmin == None:
self.vmin = self.currmin = vmin
if not vmax == None:
self.vmax = self.currmax = vmax
self.minval.configure(text="%2.3f" % self.vmin)
self.maxval.configure(text="%2.3f" % self.vmax)
self.reset()
def reset(self, event=None):
""" reset the sliders to the far edges"""
canvas = self.canvas
bb = canvas.bbox('left')
if self.orient=='horizontal':
canvas.move('left', self.leftOrigBB[0]-bb[0], 0)
else:
canvas.move('left', 0, bb[1]-self.leftOrigBB[1])
bb = canvas.bbox('right')
if self.orient=='horizontal':
canvas.move('right', self.rightOrigBB[2]-bb[2], 0)
else:
canvas.move('right', 0, bb[3]-self.rightOrigBB[3])
self.updateBackPoly(event)
## c = self.canvas
## bb1 = c.bbox('left')
## bb2 = c.bbox('right')
## print "BB1", bb1
## print "BB2", bb2
## print "\n\n\n\n ASK MICHEL!!!"
## #self.canvas.move('left', 0, 0)
## #self.canvas.move('right', 0, self.width) #-40, 0)
##
def updateBackPoly(self, event=None):
"""update the background polygon to span betweent the cursors"""
canvas = self.canvas
bb1 = canvas.bbox('left')
bb2 = canvas.bbox('right')
canvas.coords('poly', bb1[2]+1, bb1[1]+1, bb2[0]-1, bb1[3]-1)
self.currmin = self.vmin + (self.vmax - self.vmin) * self.pc(bb1[2])
self.currmax = self.vmin + (self.vmax - self.vmin) * self.pc(bb2[0])
if self.cb:
try:
self.cb(self.currmin, self.currmax)
except:
# this is here to catch when the widget
#is packed the first time
#print "catched exception with callback... first-time init?"
return
def getvalues(self):
""" return the current slider values"""
return (self.currmin, self.currmax)
if __name__=='__main__':
root = Tkinter.Tk()
rangeSlider = RangeSlider(root, width=200, height=15)
#rangeSlider2 = RangeSlider(root, width=200, height=15)
print rangeSlider.canvas.bbox('left')
print rangeSlider.canvas.bbox('right')
print rangeSlider.canvas.bbox('poly')
| [
2,
220,
220,
220,
220,
220,
220,
220,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
11160,
35,
735,
930,
371,
8679,
261,
17,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
15069,
2211,
11,
22350,
5733,
1114,
4528,
1... | 2.187398 | 1,841 |
import unittest
from unittest.mock import patch
from os import environ
from json import loads
import boto3 as boto
from botocore.exceptions import ClientError
from moto import mock_s3
from mongomock import MongoClient as MockMongoClient
from mongo_lambda_backup.handler import handler
@patch.dict(
environ,
{
"BUCKET_NAME": "mongo-lambda-backup-1",
"MONGO_URI": "mongodb://localhost/test-db",
"COLLECTION_BLACKLIST": "skip,skip2",
},
)
@mock_s3
@patch.dict(
environ,
{
"BUCKET_NAME": "mongo-lambda-backup-2",
"MONGO_URI": "mongodb://localhost/test-db",
"COLLECTION_BLACKLIST": "skip,skip2",
"IN_MEMORY": "True",
},
)
@mock_s3
| [
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
6738,
28686,
1330,
551,
2268,
198,
6738,
33918,
1330,
15989,
198,
198,
11748,
275,
2069,
18,
355,
275,
2069,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
... | 2.260317 | 315 |
import datetime
import os
import subprocess
import sys
from os import listdir
from zipfile import ZipFile
import zipfile
import pandas
import yaml
from feed_manager import FeedManager
from gtfspy import exports, filter, import_validator, timetable_validator, util
from gtfspy.gtfs import GTFS
from gtfspy.aggregate_stops import aggregate_stops_spatially
from gtfspy.networks import combined_stop_to_stop_transit_network
from licenses.adapt_licenses import create_license_files
from read_to_publish_csv import to_publish_generator
from settings import COUNTRY_FEED_LIST, TO_PUBLISH_ROOT_OUTPUT_DIR, SQLITE_ENDING, COUNTRY_FEEDS_DIR, \
THUMBNAIL_DIR, GTFS_ZIPFILE_BASENAME
from city_notes import CITY_ID_TO_NOTES_STR
"""
This script finds, imports, filters and validates one or several raw gtfs files.
Preparations:
- Identify needed rawfolders from to_publish.csv ->
browse trough all; create list of all feeds: city, feed, date1, date2, date3...
- Check that all subfeeds are available for the wanted extract period (download date).
Note that some subfeeds have been renamed.
Input: rawfolder, download date, city, to_publish.csv
See ExtractPipeline.run_full_without_deploy for details on what is done.
"""
import matplotlib
#matplotlib.use("agg")
matplotlib.use("TkAgg") # use this if interactive visualizations are wanted
AVAILABLE_COMMANDS = ['full',
"thumbnail",
"licenses",
"create_networks",
"clear",
"deploy_to_server",
"copy_from_hammer",
"import_raw",
"clear_main",
"stats",
"extract_start_date",
"notes",
"extracts"]
SUCCESS = True
if __name__ == "__main__":
main()
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
19974,
7753,
1330,
38636,
8979,
198,
11748,
19974,
7753,
198,
198,
11748,
19798,
292,
198,
11748,
331,
43695,
... | 2.375796 | 785 |
from ggplib.util.symbols import SymbolFactory, ListTerm, Term
def rotate_90(x, y, x_cords, y_cords):
' anti-clockwise '
assert len(x_cords) == len(y_cords)
x_idx = x_cords.index(x)
y_idx = y_cords.index(y)
return x_cords[y_idx], y_cords[len(x_cords) - x_idx - 1]
symbol_factory = None
| [
6738,
308,
70,
489,
571,
13,
22602,
13,
1837,
2022,
10220,
1330,
38357,
22810,
11,
7343,
40596,
11,
35118,
628,
628,
198,
4299,
23064,
62,
3829,
7,
87,
11,
331,
11,
2124,
62,
66,
3669,
11,
331,
62,
66,
3669,
2599,
198,
220,
220,
... | 2.051948 | 154 |
from datetime import datetime
import time
from gpiozero import Button
import os
import pygame
from tinytag import TinyTag
from random import *
from guizero import *
from colorsys import hsv_to_rgb
import pyowm
### Switch, Pick, Update Functions
### Helpers for routine functions
###############
#picks a background color, each call shifts hue by one degree
#Call in while loop for gradual color change
### For User Output, convert time format
#Gets current system time in user requested format
#Counts files in path
#Picks random file from path and returns path/file
#Files MUST follow naming convention 01.mp3, 02.mp3... 99.pm3
#Gets folder location based on audioType request
#Sends folder to getFile to pick random (but not repeated) audio file
#Plays the selected audio file
#Audio file runs until it is over or until stopAudio() is called
#Gracefully Ends Audio
#Checks for a signal from the button and/or motion sensor
#main loop, checks alarm time with system clock, changes text and background color
#State depenedent on if statements, each call with run new section depending on time
#############################################################################################
#Global Vars, changed by functions and app
print("started")
alarmH = 12
alarmM = 00
alarmStatus = True #If false, alarm off
pm = False
whiteNoise = False
music = False
H24 = False
absolutePath = os.path.abspath('AppliedComputingFinal.py')[:-24]
triggered = False
hue = 0 #used as global var to change background of time display
button = Button(18)
volUp = Button(23)
volDown = Button(24)
oldA = False
oldB = False
###First app, set params
app = App(title="Alarm", height = 500, width = 500, bg = '#dbf3ff')
message = Text(app, text='Set Your Alarm',font = "Quicksand", size = 30)
message = Text(app, text = "Hour",font = "Quicksand", size = 20)
sliderH = Slider(app, start=1, end = 12, command = setH, width= 400)
message = Text(app, text = "Minute",font = "Quicksand", size = 20)
sliderM = Slider(app, start=0, end = 59, command = setM, width = 400)
checkboxPM = CheckBox(app, text = "PM", command = switchPM)
checkboxPM.font = 'Quicksand'
checkboxMusic = CheckBox(app, text = "Musical Alarm", command = switchMusic)
checkboxMusic.font = "Quicksand"
checkboxWhiteNoise = CheckBox(app, text = "White Noise", command = switchWhiteNoise)
checkboxWhiteNoise.font = "Quicksand"
checkbox24H = CheckBox(app, text = "24H Time", command = switch24H)
checkbox24H.font = 'Quicksand'
checkboxAlarmOff = CheckBox(app, text = "No Alarm", command = switchOff)
checkboxAlarmOff.font = 'Quicksand'
message = Text(app, text = '')
buttonExit = PushButton(app, text = "All Set", command = app.destroy)
buttonExit.font = 'Quicksand'
buttonExit.bg = "#5ed6a2"
app.display()
#Get Weather info
apiKey = "APIKeyHere"
weatherObject = pyowm.OWM(apiKey)
obsObj = weatherObject.weather_at_place('Chicago, United States')
weather = obsObj.get_weather()
##Second app, night display and alarm
##########################
lightApp = App(title="Alarm", height = 1080, width = 1920, bg = (0,0,0))
welcomeMessage = Text(lightApp, text = "\nGood morning, the time is", size = 80, font = "Quicksand")
timeDisplay = Text(lightApp, text = getTime(), size = 350, font = "Piboto Thin", color = pickColor())
secondDisplay = Text(lightApp, text = time.time()%60, size = 50, color = (255,255,255), font = "Quicksand")
weatherDisplay = Text(lightApp, text = '\n\n' , size = 30, font = "Piboto Thin", color = 'white')
lightApp.set_full_screen()
audioEnd = 0
pathLastPlayed = '' #save the path of the last audio file so nothing plays twice in a row
###initialize audio channel for use
pygame.mixer.init()
pygame.mixer.set_num_channels(1);
pygame.mixer.music.set_volume(0.2)
alarmTime = formatTimeSet(alarmH, alarmM, pm);
timeDisplay.repeat(100, alarmOn)
secondDisplay.repeat(20, updateSecond)
weatherDisplay.repeat(10000, getTemp)
lightApp.display()
#print('Pi thinks the date/time is:', getTime())
#print('The Alarm will go off at:', alarmTime)
stopAudio()
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
640,
198,
6738,
27809,
952,
22570,
1330,
20969,
198,
11748,
28686,
198,
11748,
12972,
6057,
198,
6738,
7009,
12985,
1330,
20443,
24835,
198,
6738,
4738,
1330,
1635,
198,
6738,
915,
528,
3529... | 3.130669 | 1,301 |
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains common features to manage and handle binary log files.
"""
import io
import errno
import os
import shutil
import time
from datetime import datetime
from mysql.utilities.exception import UtilError
LOG_TYPES = ['bin', 'relay', 'all']
LOG_TYPE_BIN = LOG_TYPES[0]
LOG_TYPE_RELAY = LOG_TYPES[1]
LOG_TYPE_ALL = LOG_TYPES[2]
_DAY_IN_SECONDS = 86400
def is_binary_log_filename(filename, log_type=LOG_TYPE_ALL, basename=None):
"""Check if the filename matches the name format for binary log files.
This function checks if the given filename corresponds to the filename
format of known binary log files, according to the specified log_type and
optional basename. The file extension is a sequence number (.nnnnnn). If
a basename is given then the filename for the binary log file must have
the format 'basename.nnnnnn'. Otherwise the default filename is assumed,
depending on the log_type: '*-bin.nnnnnn' for the 'bin' log type,
'*-relay-bin.nnnnnn' for the 'relay' log type, and both for the 'all' type.
filename[in] Filename to check.
log_type[in] Type of the binary log, must be one of the following
values: 'bin' for binlog files, 'relay' for relay log
files, 'all' for both binary log files. By default = 'all'.
basename[in] Basename defined for the binary log file. None by default,
meaning that the default server name formats are assumed
(according to the given log type).
"""
# Split file basename and extension.
f_base, f_ext = os.path.splitext(filename)
f_ext = f_ext[1:] # remove starting dot '.'
# Check file basename.
if basename:
if f_base != basename:
# Defined basename does not match.
return False
else:
# Check default serve basename for the given log_type.
if log_type == LOG_TYPE_BIN:
# *-bin.nnnnnn (excluding *-relay-bin.nnnnnn)
if not f_base.endswith('-bin') or f_base.endswith('-relay-bin'):
return False
elif log_type == LOG_TYPE_RELAY:
# *-relay-bin.nnnnnn
if not f_base.endswith('-relay-bin'):
return False
elif log_type == LOG_TYPE_ALL:
# *-bin.nnnnnn (including *-relay-bin.nnnnnn)
if not f_base.endswith('-bin'):
return False
else:
raise UtilError("Unsupported log-type: {0}".format(log_type))
# Check file extension.
try:
int(f_ext)
except ValueError:
# Extension is not a sequence number (error converting to integer).
return False
# Return true if basename and extension checks passed.
return True
def get_index_file(source, binary_log_file):
""" Find the binary log index file.
Search the index file in the specified source directory for the given
binary log file and retrieve its location (i.e., full path).
source[in] Source directory to search for the index file.
binary_log_file[in] Binary log file associated to the index file.
Return the location (full path) of the binary log index file.
"""
f_base, _ = os.path.splitext(binary_log_file)
index_filename = '{0}.index'.format(f_base)
index_file = os.path.join(source, index_filename)
if os.path.isfile(index_file):
return index_file
else:
raise UtilError("Unable to find the index file associated to file "
"'{0}'.".format(binary_log_file))
def filter_binary_logs_by_sequence(filenames, seq_list):
"""Filter filenames according to the given sequence number list.
This function filters the given list of filenames according to the given
sequence number list, excluding the filenames that do not match.
Note: It is assumed that given filenames are valid binary log files.
Use is_binary_log_filename() to check each filenames.
filenames[in] List of binary log filenames to check.
seq_list[in] List of allowed sequence numbers or intervals.
For example: 3,5-12,16,21.
Returns a list of the filenames matching the given sequence number filter.
"""
res_list = []
for filename in filenames:
# Split file basename and extension.
_, f_ext = os.path.splitext(filename)
f_ext = int(f_ext[1:]) # remove starting dot '.' and convert to int
for seq_value in seq_list:
# Check if the sequence value is an interval (tuple) or int.
if isinstance(seq_value, tuple):
# It is an interval; Check if it contains the file sequence
# number.
if seq_value[0] <= f_ext <= seq_value[1]:
res_list.append(filename)
break
else:
# Directly check if the sequence numbers match (are equal).
if f_ext == seq_value:
res_list.append(filename)
break
# Retrieve the resulting filename list (filtered by sequence number).
return res_list
def filter_binary_logs_by_date(filenames, source, max_date):
"""Filter filenames according their last modification date.
This function filters the given list of files according to their last
modification date, excluding those with the last change before the given
max_date.
Note: It is assumed that given filenames are valid binary log files.
Use is_binary_log_filename() to check each filename.
filenames[in] List of binary log filenames to check.
source[in] Source directory where the files are located.
max_date[in] Maximum modification date, in the format 'yyyy-mm-dd' or
'yyyy-mm-ddThh:mm:ss', or number of days since the last
modification.
Returns a list of the filenames not changed within the given elapsed days
(i.e., recently changed files will be excluded).
"""
res_list = []
# Compute maximum modified date/time, according to supported formats.
try:
elapsed_days = int(max_date)
except ValueError:
# Max date is not a valid integer (i.e., number of days).
elapsed_days = None
if elapsed_days: # Process the specified number fo days
if elapsed_days < 1:
raise UtilError(
"Invalid number of days (must be an integer greater than "
"zero): {0}".format(max_date)
)
# Get current local time.
ct_tuple = time.localtime()
# Set time to 00:00:00.
ct_list = list(ct_tuple)
ct_list[3] = 0 # hours
ct_list[4] = 0 # minutes
ct_list[5] = 0 # seconds
ct_tuple_0000 = tuple(ct_list)
# Get seconds since epoch for the current day at 00:00.
day_start_time = time.mktime(ct_tuple_0000)
# Compute max modified date based on elapsed days ignoring time, i.e.,
# 00:00 is used as reference to count days. Current day count as one.
max_time = day_start_time - (_DAY_IN_SECONDS * (elapsed_days - 1))
max_date = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(max_time))
else: # Process the specified date
# Check the date format.
_, _, time_val = max_date.partition('T')
if time_val:
try:
dt_max_date = datetime.strptime(max_date, '%Y-%m-%dT%H:%M:%S')
except ValueError:
raise UtilError(
"Invalid date/time format (yyyy-mm-ddThh:mm:ss): "
"{0}".format(max_date)
)
else:
try:
dt_max_date = datetime.strptime(max_date, '%Y-%m-%d')
except ValueError:
raise UtilError(
"Invalid date format (yyyy-mm-dd): {0}".format(max_date)
)
max_date = dt_max_date.strftime('%Y-%m-%dT%H:%M:%S')
# Check modified date for each file.
for filename in filenames:
source_file = os.path.join(source, filename)
modified_time = os.path.getmtime(source_file)
modified_date = time.strftime('%Y-%m-%dT%H:%M:%S',
time.localtime(modified_time))
if modified_date < max_date:
res_list.append(filename)
# Retrieve the resulting filename list (filtered by modified date).
return res_list
def move_binary_log(source, destination, filename, log_index,
undo_on_error=True):
"""Move a binary log file to a specific destination.
This method move the given binary log file (filename), located in the
source directory, to the specified destination directory and updates the
respective index file accordingly.
Note: An error is raised if any issue occurs during the process.
Additionally, if the undo_on_error=True (default) then the file is moved
back to the source directory if an error occurred while updating the index
file (keeping the file in the original location and the index file
unchanged). Otherwise the file might be moved and the index file not
correctly updated. In either cases an error is issued.
source[in] Source directory where the binary log file is located.
destination[in] Destination directory to move the binary log.
filename[in] Name of the binary log file to move.
log_index[in] Location (full path) of the binary log index file.
undo_on_error[in] Flag to undo the file move if an error occurs (when
updating the index file) or not. By default = True,
meaning that the move operation is reverted ().
"""
def _move_file_back():
"""Try to move the file back to its original source directory.
Returns a warning message indicating if the file was moved back
successfully or not.
"""
try:
# Move file back to source directory.
destination_file = os.path.join(destination, filename)
shutil.move(destination_file, source)
except (IOError, shutil.Error) as move_err:
# Warn the user that an error occurred while trying to
# move the file back.
return ("\nWARNING: Failed to move file back to source directory: "
"{0}").format(move_err)
else:
# Notify user that the file was successfully moved back.
return "\nWARNING: File move aborted."
# Move file to destination directory.
source_file = os.path.join(source, filename)
if os.path.isdir(destination):
shutil.move(source_file, destination)
else:
# Raise an error if the destination dir does not exist.
# Note: To be consistent with the IOError raised by shutil.move() if
# the source file does not exist.
raise IOError(errno.ENOENT, "No such destination directory",
destination)
# Update index file.
found_pos = None
try:
with io.open(log_index, 'r') as index_file:
# Read all data from index file.
data = index_file.readlines()
# Search for the binary log file entry.
for pos, line in enumerate(data):
if line.strip().endswith(filename):
found_pos = pos
break
if found_pos is not None:
# Replace binary file entry with absolute destination path.
data[found_pos] = u'{0}\n'.format(
os.path.join(destination, filename)
)
else:
warning = "" # No warning if undo_on_error = False.
if undo_on_error:
warning = _move_file_back()
# Raise error (including cause).
raise UtilError("Entry for file '{0}' not found in index "
"file: {1}{2}".format(filename, log_index,
warning))
# Create a new temporary index_file with the update entry.
# Note: original file is safe is something goes wrong during write.
tmp_file = '{0}.tmp'.format(log_index)
try:
with io.open(tmp_file, 'w', newline='\n') as tmp_index_file:
tmp_index_file.writelines(data)
except IOError as err:
warning = "" # No warning if undo_on_error = False.
if undo_on_error:
warning = _move_file_back()
# Raise error (including cause).
raise UtilError('Unable to write temporary index file: '
'{0}{1}'.format(err, warning))
except IOError as err:
warning = "" # No warning if undo_on_error = False.
if undo_on_error:
warning = _move_file_back()
# Raise error (including cause).
raise UtilError('Failed to update index file: '
'{0}{1}'.format(err, warning))
# Replace the original index file with the new one.
if os.name == 'posix':
os.rename(tmp_file, log_index)
else:
# On windows, rename does not work if the target file already exists.
shutil.move(tmp_file, log_index)
| [
2,
198,
2,
15069,
357,
66,
8,
1946,
11,
18650,
290,
14,
273,
663,
29116,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
... | 2.395589 | 5,895 |
# imports
import os
import collections
import tensorflow.nn as nn
from tensorflow.keras import Input,Model
from tensorflow.keras.utils import plot_model
from tensorflow.keras import layers, regularizers,activations
from utils import DotDict,configs
from models.DGV0.model_v0 import DGM
# end imports
# continue from 450 epochs to 2500
# Train : python train.py -gpu 2 -s 1 -e 1200 -b 1024
config = DotDict({ 'n_filters' : 192,
'kernel' : 5,
'n_res_blocks' : 8,
'l2_reg' : 0.0005,
'dropout' : 0.2,
'n_inc_blocks' : 14,
'squeeze' : 16,
})
'''
-------------------------------------------------------------------------------------------
DGM (DeepGoModel) : Inception Net with Squeeze & Excitation Blocks / Swish
-------------------------------------------------------------------------------------------
'''
| [
2,
17944,
198,
11748,
28686,
198,
11748,
17268,
198,
11748,
11192,
273,
11125,
13,
20471,
355,
299,
77,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
1330,
23412,
11,
17633,
198,
6738,
11192,
273,
11125,
13,
6122,
292,
13,
26791,
1330,
... | 2.352804 | 428 |
from kivy.app import App
from kivy.core.window import Window
from kivymd.uix.screen import MDScreen
| [
6738,
479,
452,
88,
13,
1324,
1330,
2034,
198,
6738,
479,
452,
88,
13,
7295,
13,
17497,
1330,
26580,
198,
6738,
479,
452,
4948,
67,
13,
84,
844,
13,
9612,
1330,
10670,
23901,
628
] | 2.970588 | 34 |
INPUT_CLASS_SUFFIX = "Input"
| [
1268,
30076,
62,
31631,
62,
12564,
5777,
10426,
796,
366,
20560,
1,
198
] | 2.230769 | 13 |
# -*- coding: utf-8 -*-
import numpy as np
import math
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
import scipy as sp
Protein_Atom = ['C','N','O','S']
Ligand_Atom = ['C','N','O','S','P','F','Cl','Br','I']
aa_list = ['ALA','ARG','ASN','ASP','CYS','GLU','GLN','GLY','HIS','HSE','HSD','SEC',
'ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL','PYL']
pre = '' # this is the path where you place this file
Year = '2013'
f1 = open(pre + '../data/' + Year + '/name/train_data_' + Year + '.txt')
pre_train_data = f1.readlines()
train_data = eval(pre_train_data[0])
f1.close()
f1 = open(pre + '../data/' + Year + '/name/test_data_' + Year + '.txt')
pre_test_data = f1.readlines()
test_data = eval(pre_test_data[0])
f1.close()
f1 = open(pre + '../data/' + Year + '/name/all_data_' + Year + '.txt')
pre_all_data = f1.readlines()
all_data = eval(pre_all_data[0])
f1.close()
########################################################################################
# extract coordinate code starts
def pocket_coordinate_data_to_file(start,end):
#########################################################################
'''
this function extract the atom coordinates for each atom-pair of protein-ligand
complex.
output is a coordinate file and a description file, the description file records
the number of atoms for protein and ligand. the coordinate file has four columns,
the former three columns are the coordinate, the last column are 1 and 2 for protein
and ligand atoms respectively.
(1) start and end are index of data you will deal with
(2) before this function, you need to prepare the PDBbind data
'''
#########################################################################
t1 = len(all_data)
for i in range(start,end):
#print('process {0}-th '.format(i))
protein = {}
for ii in range(4):
protein[Protein_Atom[ii]] = []
name = all_data[i]
t1 = pre + '../data/' + Year + '/refined/' + name + '/' + name + '_pocket.pdb'
f1 = open(t1,'r')
for line in f1.readlines():
if (line[0:4]=='ATOM')&(line[17:20] in aa_list ):
atom = line[13:15]
atom = atom.strip()
index = get_index(atom,Protein_Atom)
if index==-1:
continue
else:
protein[Protein_Atom[index]].append(line[30:54])
f1.close()
ligand = {}
for ii in range(9):
ligand[Ligand_Atom[ii]] = []
t2 = pre + '../data/' +Year + '/refined/' + name + '/' + name + '_ligand.mol2'
f2 = open(t2,'r')
contents = f2.readlines()
t3 = len(contents)
start = 0
end = 0
for jj in range(t3):
if contents[jj][0:13]=='@<TRIPOS>ATOM':
start = jj + 1
continue
if contents[jj][0:13]=='@<TRIPOS>BOND':
end = jj - 1
break
for kk in range(start,end+1):
if contents[kk][8:17]=='thiophene':
print('thiophene',kk)
atom = contents[kk][8:10]
atom = atom.strip()
index = get_index(atom,Ligand_Atom)
if index==-1:
continue
else:
ligand[Ligand_Atom[index]].append(contents[kk][17:46])
f2.close()
for i in range(4):
for j in range(9):
l_atom = ligand[ Ligand_Atom[j] ]
p_atom = protein[ Protein_Atom[i] ]
number_p = len(p_atom)
number_l = len(l_atom)
number_all = number_p + number_l
all_atom = np.zeros((number_all,4))
for jj in range(number_p):
all_atom[jj][0] = float(p_atom[jj][0:8])
all_atom[jj][1] = float(p_atom[jj][8:16])
all_atom[jj][2] = float(p_atom[jj][16:24])
all_atom[jj][3] = 1
for jjj in range(number_p,number_all):
all_atom[jjj][0] = float(l_atom[jjj-number_p][0:9])
all_atom[jjj][1] = float(l_atom[jjj-number_p][9:19])
all_atom[jjj][2] = float(l_atom[jjj-number_p][19:29])
all_atom[jjj][3] = 2
filename2 = pre + '../data/' + Year + '/pocket_coordinate/' + name + '_' + Protein_Atom[i] + '_' + Ligand_Atom[j] + '_coordinate.csv'
np.savetxt(filename2,all_atom,delimiter=',')
filename3 = pre + '../data/' + Year + '/pocket_coordinate/' + name + '_' + Protein_Atom[i] + '_' + Ligand_Atom[j] + '_protein_ligand_number.csv'
temp = np.array(([number_p,number_l]))
np.savetxt(filename3,temp,delimiter=',')
#############################################################################################
# extract coordinate code ends
#######################################################################################################
# create_the_associated_simplicial_complex_of_a_hypergraph algorithm starts
def create_simplices_with_filtration(atom,cutoff,name,P_atom,L_atom,kill_time):
##########################################################################################
'''
this function creates the filtered associated simplicial complex for the hypergraph.
the dimension only up to 2. you can add higher dimensional information by adding some code.
(1) atom is the atom coordinates. the format is same with output of function
pocket_coordinate_to_file()
(2) cutoff determines the binding core region we extract, that is, we extract the ligand
atoms and the protein atoms within cutoff distance of the ligand. Here, cutoff also
determines the largest length of the edges we use to build the hypergraph, here also
the associated simplicial complex.(of course you can use many others methods to build
the complex, like you can add another parameter max_edge to control the largest length
of an edge, this is just a way)
(3) name is the data name.(for example, for PDBbind-2007, it has 1300 data, each data has
a name)
(4) P_atom and L_atom are the atom-combination, like C-C, C-N, etc.
(5) kill_time is an additional parameter, larger value will lead to longer persistence for
all the barcode. here we use 0.
(6) output is a sequence of ordered simplices, i.e. a filtered simplicial complex.
the format for each simplex is as follows:
[ index, filtration_value, dimension, vertices of the simplex ]
'''
##########################################################################################
vertices = []
edge = []
triangle = []
edge_same_type = [] # edge_same_type stores the edges come from the same molecular.
# i.e., the edges the hypergraph does not have.
filtration_of_edge_same_type = []
filename3 = pre + '../data/' + Year + '/pocket_coordinate/' + name + '_' + P_atom + '_' + L_atom +'_protein_ligand_number.csv'
temp = np.loadtxt(filename3,delimiter=',') # temp gives the numbers of atoms for protein and ligand
number_p = int(temp[0])
number_l = int(temp[1])
t = atom.shape
atom_number = t[0] # t is equal to the sum of number_p and number_l
if (number_p==0)|(number_l==0):# no complex
return []
for i in range(number_p):
for j in range(number_p,atom_number):
dis1 = distance_of_two_point(atom[i],atom[j])
if dis1<=cutoff:
if ([i,j] in edge)==False:
edge.append([i,j])
if (i in vertices)==False:
vertices.append(i)
if (j in vertices)==False:
vertices.append(j)
for k in range(atom_number):
if (k!=i)&(k!=j):
dis = -1
if atom[i][3]==atom[k][3]:
dis = distance_of_two_point(atom[j],atom[k])
else:
dis = distance_of_two_point(atom[i],atom[k])
if dis<=cutoff:
One = 0
Two = 0
Three = 0
if k<i:
One = k
Two = i
Three = j
elif (k>i) & (k<j):
One = i
Two = k
Three = j
else:
One = i
Two = j
Three = k
if ([One,Two,Three] in triangle)==False:
triangle.append([One,Two,Three])
if ([One,Two] in edge)==False:
edge.append([One,Two])
if atom[One][3]==atom[Two][3]:
edge_same_type.append([One,Two])
d1 = distance_of_two_point(atom[One],atom[Three])
d2 = distance_of_two_point(atom[Two],atom[Three])
d = max(d1,d2)
filtration_of_edge_same_type.append(d)
else:
edge_index = get_edge_index(One,Two,edge_same_type)
if edge_index!=-1:
temp = filtration_of_edge_same_type[edge_index]
d1 = distance_of_two_point(atom[One],atom[Three])
d2 = distance_of_two_point(atom[Two],atom[Three])
d = max(d1,d2)
filtration_of_edge_same_type[edge_index] = max(temp,d)
if ([One,Three] in edge)==False:
edge.append([One,Three])
if atom[One][3]==atom[Three][3]:
edge_same_type.append([One,Three])
d1 = distance_of_two_point(atom[One],atom[Two])
d2 = distance_of_two_point(atom[Two],atom[Three])
d = max(d1,d2)
filtration_of_edge_same_type.append(d)
else:
edge_index = get_edge_index(One,Three,edge_same_type)
if edge_index!=-1:
temp = filtration_of_edge_same_type[edge_index]
d1 = distance_of_two_point(atom[One],atom[Two])
d2 = distance_of_two_point(atom[Two],atom[Three])
d = max(d1,d2)
filtration_of_edge_same_type[edge_index] = max(temp,d)
if ([Two,Three] in edge)==False:
edge.append([Two,Three])
if atom[Two][3]==atom[Three][3]:
edge_same_type.append([Two,Three])
d1 = distance_of_two_point(atom[One],atom[Two])
d2 = distance_of_two_point(atom[One],atom[Three])
d = max(d1,d2)
filtration_of_edge_same_type.append(d)
else:
edge_index = get_edge_index(Two,Three,edge_same_type)
if edge_index!=-1:
temp = filtration_of_edge_same_type[edge_index]
d1 = distance_of_two_point(atom[One],atom[Two])
d2 = distance_of_two_point(atom[One],atom[Three])
d = max(d1,d2)
filtration_of_edge_same_type[edge_index] = max(temp,d)
if (One in vertices)==False:
vertices.append(One)
if (Two in vertices)==False:
vertices.append(Two)
if (Three in vertices)==False:
vertices.append(Three)
for i in range(number_p,atom_number): # here, we add the ligand atoms we did not add in
if (i in vertices)==False:
vertices.append(i)
vertices_number = len(vertices)
edge_number = len(edge)
triangle_number = len(triangle)
simplices_with_filtration = []
same_type_number = len(edge_same_type)
for i in range(same_type_number):
filtration_of_edge_same_type[i] = filtration_of_edge_same_type[i] + kill_time
if vertices_number==0:
return []
for i in range(vertices_number):
item = [ i , 0 , 0 , vertices[i] ]
simplices_with_filtration.append(item)
for i in range( vertices_number , vertices_number + edge_number ):
one = edge[ i - vertices_number ][0]
two = edge[ i - vertices_number ][1]
p1 = atom[ one ]
p2 = atom[ two ]
dis = distance_of_two_point(p1,p2)
edge_index = get_edge_index(one,two,edge_same_type)
if edge_index!=-1:
dis = filtration_of_edge_same_type[edge_index]
dis = round(dis,15)
if dis<=cutoff:
item = [ i , dis , 1 , one , two ]
simplices_with_filtration.append(item)
for i in range( vertices_number + edge_number , vertices_number + edge_number + triangle_number ):
one = triangle[ i - vertices_number - edge_number ][0]
two = triangle[ i - vertices_number - edge_number ][1]
three = triangle[ i - vertices_number - edge_number ][2]
p1 = atom[ one ]
p2 = atom[ two ]
p3 = atom[ three ]
dis = -1
if ([one,two] in edge_same_type)==False:
dis1 = distance_of_two_point(p1,p2)
dis = max(dis,dis1)
else:
edge_index = get_edge_index(one,two,edge_same_type)
temp = filtration_of_edge_same_type[edge_index]
dis = max(dis,temp)
if ([one,three] in edge_same_type)==False:
dis2 = distance_of_two_point(p1,p3)
dis = max(dis,dis2)
else:
edge_index = get_edge_index(one,three,edge_same_type)
temp = filtration_of_edge_same_type[edge_index]
dis = max(dis,temp)
if ([two ,three] in edge_same_type)==False:
dis3 = distance_of_two_point(p2,p3)
dis = max(dis,dis3)
else:
edge_index = get_edge_index(two,three,edge_same_type)
temp = filtration_of_edge_same_type[edge_index]
dis = max(dis,temp)
dis = round(dis,15)
if dis<=cutoff:
item = [ i , dis , 2 , one , two , three ]
simplices_with_filtration.append(item)
simplices = sorted(simplices_with_filtration,key=lambda x:(x[1]+x[2]/10000000000000000))
# by applying the function sorted, the simplicies will be ordered by the filtration values.
# also the face of a simplex will appear earlier than the simplex itself.
for i in range(len(simplices)):
simplices[i][0] = i # assign index for the ordered simplices
return simplices
def simplices_to_file(start,end,cutoff,kill_time):
################################################################################################
'''
this function write the associated simplicial complex of the hypergraph to file
(1) start and end are the indexes of data we deal with
(2) cutoff, and kill_time are same with the function "create_simplices_with_filtration"
(3) before this function, the function pocket_coordinate_data_to_file(start,end) need to
be performed to prepare the coordinate data for this function.
'''
################################################################################################
t = len(all_data)
for i in range(start,end):
name = all_data[i]
print('process {0}-th data {1}'.format(i,name))
for P in range(4):
for L in range(9):
filename = pre + '../data/' + Year + '/pocket_coordinate/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] +'_coordinate.csv'
point_cloud = np.loadtxt(filename,delimiter=',')
simplices_with_filtration = create_simplices_with_filtration(point_cloud,cutoff,name,Protein_Atom[P],Ligand_Atom[L],kill_time)
filename2 = pre + '../data/' + Year + '/pocket_simplices_' + str(cutoff) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '.txt'
f1 = open(filename2,'w')
f1.writelines(str(simplices_with_filtration))
f1.close()
######################################################################################################
# create_the_associated_simplicial_complex_of_a_hypergraph algorithm ends
######################################################################################################
# the persistent cohomology algorithm starts from now(coefficient is Z/2)
def add_two_base_one_dimension(parameter1,base1,parameter2,base2):
#############################################################
'''
this function compute the sum of parameter1*base1 and parameter2*base2
base1 and base2 are both 1-cochain
'''
#############################################################
t1 = len(base1)
t2 = len(base2)
b1 = np.ones((t1-1,3))
b2 = np.ones((t2-1,3))
for i in range(1,t1):
b1[i-1][0] = base1[i][0]
b1[i-1][1] = base1[i][1]
b1[i-1][2] = base1[i][2]
for i in range(1,t2):
b2[i-1][0] = base2[i][0]
b2[i-1][1] = base2[i][1]
b2[i-1][2] = base2[i][2]
count =t1-1 + t2-1
for i in range(t1-1):
for j in range(t2-1):
if (b1[i][0]==b2[j][0])&(b1[i][1]==b2[j][1]):
count = count -1
break
res = np.ones((count,3))
for i in range(t1-1):
b1[i][2] = b1[i][2]*parameter1
res[i,:] = b1[i,:]
C = t1 -1
for i in range(t2-1):
have = 0
for j in range(t1-1):
if (res[j][0]==b2[i][0])&(res[j][1]==b2[i][1]):
res[j][2] = res[j][2] + b2[i][2] * parameter2
have = 1
break
if have ==0:
b2[i][2] = b2[i][2] * parameter2
res[C,:] = b2[i,:]
C = C + 1
rrr = [1]
for i in range(count):
rrr.append([res[i][0],res[i][1],res[i][2]])
rrr = delete_zero_of_base(rrr) # only store nonzero information
return rrr
def add_two_base_zero_dimension(parameter1,base1,parameter2,base2):
#############################################################
'''
this function compute the sum of parameter1*base1 and parameter2*base2
base1 and base2 are both 0-cochain
'''
#############################################################
t1 = len(base1)
t2 = len(base2)
b1 = np.ones((t1-1,2))
b2 = np.ones((t2-1,2))
for i in range(1,t1):
b1[i-1][0] = base1[i][0]
b1[i-1][1] = base1[i][1]
for i in range(1,t2):
b2[i-1][0] = base2[i][0]
b2[i-1][1] = base2[i][1]
count =t1-1 + t2-1
for i in range(t1-1):
for j in range(t2-1):
if (b1[i][0]==b2[j][0]):
count = count -1
break
res = np.ones((count,2))
for i in range(t1-1):
b1[i][1] = b1[i][1] * parameter1
res[i,:] = b1[i,:]
C = t1 -1
for i in range(t2-1):
have = 0
for j in range(t1-1):
if (res[j][0]==b2[i][0]):
res[j][1] = res[j][1] + b2[i][1] * parameter2
have = 1
break
if have ==0:
b2[i][1] = b2[i][1] * parameter2
res[C,:] = b2[i,:]
C = C + 1
rrr = [0]
for i in range(count):
rrr.append([res[i][0],res[i][1]])
rrr = delete_zero_of_base(rrr) # only store nonzero information
return rrr
def get_result(point_cloud,simplices_with_filtration):
######################################################################################
'''
this function generates the persistent cohomology barcodes and generators for the
associated simplicial complex of a hypergraph.
(1) point_cloud is the coordinate data of a specific atom-combination of some data,
the format is same with the output of pocket_coordinate_data_to_file()
(2) simplicies_with_filtration is the output of function "create_simplices_with_filtration"
(3) output is the zero_barcodes, zero_generators, one_barcodes and one_generators.
you can get higher dimensional information by adding some code.
'''
######################################################################################
t1 = len(simplices_with_filtration)
if t1==0:
return []
threshold = t1
I = [0]
P = [] # P is a list of pair[ [alpha_p,alpha_q],... ] d(alpha_p) = alpha_q
base = [ [0, [ int(simplices_with_filtration[0][3]) ,1]] ]
# format of an element of base: [dimension , [simplices(increasing order),value]]
for m in range(1,threshold):
m_dimension = simplices_with_filtration[m][2]
C = np.zeros((m,1))
m_boundary = []
if m_dimension==0:
m_boundary.append([-1])
elif m_dimension==1:
m_boundary.append([simplices_with_filtration[m][3]])
m_boundary.append([simplices_with_filtration[m][4]])
elif m_dimension==2:
zero_one = [simplices_with_filtration[m][3],simplices_with_filtration[m][4]]
zero_two = [simplices_with_filtration[m][3],simplices_with_filtration[m][5]]
one_two = [simplices_with_filtration[m][4],simplices_with_filtration[m][5]]
m_boundary.append(zero_one)
m_boundary.append(zero_two)
m_boundary.append(one_two)
# can add higher dimensional information if you need
for p in P:
alpha_p = base[p[0]]
if (alpha_p[0] + 1)!= m_dimension:
C[p[0]][0] = 0
else:
C[p[0]][0] = get_value_alpha_P_on_m_boundary(alpha_p,m_boundary,m_dimension)
if C[p[0]][0]!=0:
new_item = simplices_with_filtration[m][3:4+m_dimension]
new_item.append(C[p[0]][0])
base[p[1]].append(new_item)
I_max_none_zero_number = -100
for i in I:
alpha_i = base[i]
if (alpha_i[0] + 1)!= m_dimension:
C[i][0] = 0
else:
C[i][0] = get_value_alpha_P_on_m_boundary(alpha_i,m_boundary,m_dimension)
for i in I:
if (C[i][0]!=0)&(i>I_max_none_zero_number):
I_max_none_zero_number = i
if I_max_none_zero_number == -100:
I.append(m)
new_item = [m_dimension]
new_item.append(simplices_with_filtration[m][3:4+m_dimension])
new_item[1].append(1)
base.append(new_item)
else:
M = I_max_none_zero_number
for t in range(len(I)):
if I[t] == M:
del I[t]
break
P.append([M,m])
temp_base = [base[M][0]]
for i in range(1,len(base[M])):
temp_base.append(base[M][i])
for i in I:
if C[i][0]!=0:
parameter = C[i][0]/C[M][0]
if (base[i][0]==0):
base[i] = add_two_base_zero_dimension(1,base[i],-parameter,temp_base)
elif base[i][0]==1:
base[i] = add_two_base_one_dimension(1,base[i],-parameter,temp_base)
# can add higher dimensional information if you need
new_item = [m_dimension]
new_item.append(simplices_with_filtration[m][3:4+m_dimension])
new_item[1].append(C[M][0])
base.append(new_item)
zero_cocycle = []
one_cocycle =[]
two_cocycle = []
zero_bar = []
one_bar = []
two_bar = []
for i in I:
if base[i][0]==0:
zero_cocycle.append(base[i][1::])
zero_bar.append([i,-1])
elif base[i][0]==1:
one_cocycle.append(base[i][1::])
one_bar.append([i,-1])
# can add higher dimensional information if you need
for p in P:
if (base[p[0]][0]==0)&((simplices_with_filtration[p[1]][1]-simplices_with_filtration[p[0]][1])>0):
zero_cocycle.append(base[p[0]][1::])
zero_bar.append([p[0],p[1]])
elif (base[p[0]][0]==1)&((simplices_with_filtration[p[1]][1]-simplices_with_filtration[p[0]][1])>0):
one_cocycle.append(base[p[0]][1::])
one_bar.append([p[0],p[1]])
# can add higher dimensional information if you need
result = {'cocycles':[zero_cocycle,one_cocycle,two_cocycle],
'diagrams':[zero_bar,one_bar,two_bar]}
return result
def bar_and_cocycle_to_file(start,end,cutoff,filtration):
########################################################################################
'''
this function write the cohomology generators and barcodes to a file
(1) start and end are the indexes of data we deal with
(2) cutoff, and kill_time are same with the function "create_simplices_with_filtration"
(3) parameter filtration determines the filtration range we use.
(4) before this function, the function simplices_to_file(start,end,cutoff,kill_time)
should be performed to prepare the simplices data we use here
'''
########################################################################################
t = len(all_data)
for i in range(start,end):
name = all_data[i]
print('process {0}-th bar {1}'.format(i,name))
for P in range(4):
for L in range(9):
filename1 = pre + '../data/' + Year + '/pocket_coordinate/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] +'_coordinate.csv'
point_cloud = np.loadtxt(filename1,delimiter=',')
filename2 = pre + '../data/' + Year + '/pocket_simplices_' + str(cutoff) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '.txt'
f = open(filename2)
pre_simplices = f.readlines()
simplices = eval(pre_simplices[0])
simplices_with_filtration = []
for ii in range(len(simplices)):
if simplices[ii][1]<=filtration:
simplices_with_filtration.append(simplices[ii])
else:
break
result = get_result(point_cloud,simplices_with_filtration)
if result==[]:
filename1 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_bar.csv'
zero_bar = np.zeros((1,2))
np.savetxt(filename1,zero_bar,delimiter=',')
filename3 = pre + '../data/' + Year + '/pocket_cocycle_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_cocycle.txt'
f3 = open(filename3,'w')
f3.writelines('')
f3.close()
filename2 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_bar.csv'
one_bar = np.zeros((1,2))
np.savetxt(filename2,one_bar,delimiter=',')
filename4 = pre + '../data/' + Year + '/pocket_cocycle_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_cocycle.txt'
f4 = open(filename4,'w')
f4.writelines('')
f4.close()
continue
diagrams = result['diagrams']
cocycles = result['cocycles']
cocycle0 = cocycles[0]
cocycle1 = cocycles[1]
dgm0 = np.array(diagrams[0])
dgm1 = np.array(diagrams[1])
zero = dgm0.shape
zero_number = zero[0]
zero_bar = np.zeros((zero_number,2))
one = dgm1.shape
one_number = one[0]
one_bar = np.zeros((one_number,2))
for ii in range(zero_number):
left = dgm0[ii][0]
right = dgm0[ii][1]
zero_bar[ii][0] = simplices_with_filtration[left][1]
zero_bar[ii][1] = simplices_with_filtration[right][1]
if right==-1:
zero_bar[ii][1] = float('inf')
for j in range(one_number):
left = dgm1[j][0]
right = dgm1[j][1]
one_bar[j][0] = simplices_with_filtration[left][1]
one_bar[j][1] = simplices_with_filtration[right][1]
if right==-1:
one_bar[j][1] = float('inf')
#draw_barcodes(zero_bar,one_bar,max_distance)
filename1 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_bar.csv'
np.savetxt(filename1,zero_bar,delimiter=',')
filename3 = pre + '../data/' + Year + '/pocket_cocycle_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_cocycle.txt'
f3 = open(filename3,'w')
f3.writelines(str(cocycle0))
f3.close()
filename2 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_bar.csv'
np.savetxt(filename2,one_bar,delimiter=',')
filename4 = pre + '../data/' + Year + '/pocket_cocycle_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_cocycle.txt'
f4 = open(filename4,'w')
f4.writelines(str(cocycle1))
f4.close()
#######################################################################################################
# the persistent cohomology algorithm ends
#####################################################################################################
# feature_generation algorithm starts from now
def get_number(bar,left,right):
##########################################################################
'''
this function compute the number of bars covering the interval [left,right]
'''
##########################################################################
t = bar.shape
if (len(t)==1):
return 0
num = t[0]
res = 0
for i in range(num):
if (bar[i][0]<=left)&(bar[i][1]>=right):
res = res + 1
return res
def get_feature_of_train(start,end,cutoff,filtration,unit):
##########################################################################
'''
this function generate the training feature vectors from HPC, the method
is bin counts.
(1) cutoff and filtration are same with function "bar_and_cocycle_to_file"
(2) unit is the size of each bin
(3) before this function, function bar_and_cocycle_to_file() should be
performed to prepare the barcode
'''
##########################################################################
t = len(train_data)
column0 = int( (filtration-2)/unit ) # start from 2
column1 = int( (filtration-2)/unit )
feature_matrix = np.zeros(( t , 36 * ( column0 + column1 ) ))
for i in range(start,end):
#print('process {0}-th of train feature'.format(i))
name = train_data[i]
count = 0
for P in range(4):
for L in range(9):
filename0 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_bar.csv'
zero_bar = np.loadtxt(filename0,delimiter=',')
filename1 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_bar.csv'
one_bar = np.loadtxt(filename1,delimiter=',')
for n in range(column0):
feature_matrix[i][count] = get_number( zero_bar , 2 + unit * n , 2 + unit * (n+1) )
count = count + 1
for n in range(column1):
feature_matrix[i][count] = get_number( one_bar , 2 + unit * n , 2 + unit * (n+1) )
count = count + 1
#draw_barcodes(zero_bar,one_bar)
np.savetxt(pre + '../data/' + Year + '/pocket_feature/feature_matrix_of_train_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv',feature_matrix,delimiter=',')
def get_feature_of_test(start,end,cutoff,filtration,unit):
##########################################################################
'''
this function generate the testing feature vectors from HPC, the method
is bin counts.
(1) cutoff and filtration are same with function "bar_and_cocycle_to_file"
(2) unit is the size of each bin
(3) before this function, function bar_and_cocycle_to_file() should be
performed to prepare the barcode
'''
##########################################################################
t = len(test_data)
column0 = int( (filtration-2)/unit ) # start from 2
column1 = int( (filtration-2)/unit )
feature_matrix = np.zeros(( t , 36 * ( column0 + column1 ) ))
for i in range(start,end):
#print('process {0}-th of test feature'.format(i))
name = test_data[i]
count = 0
for P in range(4):
for L in range(9):
filename0 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_bar.csv'
zero_bar = np.loadtxt(filename0,delimiter=',')
filename1 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_bar.csv'
one_bar = np.loadtxt(filename1,delimiter=',')
for n in range(column0):
feature_matrix[i][count] = get_number( zero_bar , 2 + unit * n , 2 + unit * (n+1) )
count = count + 1
for n in range(column1):
feature_matrix[i][count] = get_number( one_bar , 2 + unit * n , 2 + unit * (n+1) )
count = count + 1
#draw_barcodes(zero_bar,one_bar)
np.savetxt(pre + '../data/' + Year + '/pocket_feature/feature_matrix_of_test_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv',feature_matrix,delimiter=',')
def create_coordinate_with_associated_distance(start,end):
######################################################################################
'''
this function compute all the adjacent distances from a atom to its all adjacent atoms.
then, these distance will be used to form the centrality weight for each atom.
'''
######################################################################################
pre1 = pre + '../data/' + Year + '/pocket_coordinate/'
pre2 = pre + '../data/' + Year + '/pocket_coordinate_with_associated_distance/'
length = len(all_data)
for i in range(start,end):
print('process: ',i)
name = all_data[i]
for P in range(4):
for L in range(9):
filename1 = pre1 + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + 'coordinate.csv'
data1 = np.loadtxt(filename1,delimiter=',')
#s1 = data1.shape
#row = s1[0]
filename2 = pre1 + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + 'protein_ligand_number.csv'
temp = np.loadtxt(filename2,delimiter=',')
number_p = int(temp[0])
number_l = int(temp[1])
row = number_p + number_l
column = max(number_p,number_l) + 4
data2 = np.zeros((row,column))
if (number_p==0) | (number_l==0):
filename3 = pre2 + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + 'coordinate.csv'
np.savetxt(filename3,data2,delimiter=',')
continue
for ii in range(0,number_p):
data2[ii][0:4] = data1[ii,::]
for j in range(4,4+number_l):
dis = distance_of_two_point(data1[ii],data1[number_p+j-4])
data2[ii][j] = dis
for ii in range(number_p,number_p+number_l):
data2[ii][0:4] = data1[ii,::]
for j in range(4,4+number_p):
dis = distance_of_two_point(data1[ii],data1[j-4])
data2[ii][j] = dis
filename3 = pre2 + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + 'coordinate.csv'
np.savetxt(filename3,data2,delimiter=',')
def get_cocycle_feature_value0_centrality(cutoff,filtration,name,P,L,bar,left,right,eta):
######################################################################################
'''
this function get the sum of values of the enriched 0-barcodes in interval [left,right]
(1) cutoff and filtration are same with function "bar_and_cocycle_to_file"
(2) name is the data name
(3) P and L is the atom names for atom-pair
(4) bar is the 0-cohomology barcodes
(5) eta is the parameter control the region we capture
'''
######################################################################################
t = bar.shape
if (len(t)==1):
return 0
filename1 = pre + '../data/' + Year + '/pocket_cocycle_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + P + '_' + L + '_' + str(cutoff) + '_' + str(filtration) + '_zero_cocycle.txt'
f1 = open(filename1)
pre_zero_cocycle = f1.readlines()
zero_cocycle = eval(pre_zero_cocycle[0])
f1.close()
filename2 = pre + '../data/' + Year + '/pocket_coordinate_with_associated_distance/' + name + '_' + P + '_' + L +'_coordinate.csv'
point_cloud = np.loadtxt(filename2,delimiter=',')
p_shape = point_cloud.shape
num = t[0]
res = 0
for i in range(num):
if (bar[i][0]<=left)&(bar[i][1]>=right):
cocycle = zero_cocycle[i]
t2 = len(cocycle)
res2 = 0
for j in range(t2):
one = int(cocycle[j][0])
value = abs(cocycle[j][1]) # coefficient is Z/2, -1==1
temp_weight = 0
for inner in range(4,p_shape[1]):
if point_cloud[one][inner]==0:
break
frac = pow(point_cloud[one][inner]/eta,2)
v = math.exp(-frac)
temp_weight = temp_weight + v
res2 = res2 + value * temp_weight
res = res + res2/t2
return res
def get_cocycle_feature_value1_centrality(cutoff,filtration,name,P,L,bar,left,right,eta):
######################################################################################
'''
this function get the sum of values of the enriched 1-barcodes in interval [left,right]
(1) cutoff and filtration are same with function "bar_and_cocycle_to_file"
(2) name is the data name
(3) P and L is the atom names for atom-pair
(4) bar is the 1-cohomology barcodes
(5) eta is the parameter control the region we capture
'''
######################################################################################
t = bar.shape
if (len(t)==1):
return 0
filename1 = pre + '../data/' + Year + '/pocket_cocycle_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + P + '_' + L + '_' + str(cutoff) + '_' + str(filtration) + '_one_cocycle.txt'
f1 = open(filename1)
pre_one_cocycle = f1.readlines()
one_cocycle = eval(pre_one_cocycle[0])
f1.close()
filename2 = pre + '../data/' + Year + '/pocket_coordinate/' + name + '_' + P + '_' + L +'_coordinate.csv'
point_cloud = np.loadtxt(filename2,delimiter=',')
num = t[0]
res = 0
count = 0
for i in range(num):
if (bar[i][0]<=left)&(bar[i][1]>=right):
cocycle = one_cocycle[i]
t2 = len(cocycle)
res2 = 0
for j in range(t2):
one = int(cocycle[j][0])
two = int(cocycle[j][1])
value = abs(cocycle[j][2])
dis = distance_of_two_point(point_cloud[one],point_cloud[two])
frac = pow(dis/eta,2)
v = math.exp(-frac)
res2 = res2 + value * v
res = res + res2/t2
return res
def get_cocycle_feature_of_train(start,end,cutoff,filtration,unit,eta):
#######################################################################################
'''
this function generate the training feature vectors from HWPC, the method is bin counts.
(1) start and end are the indexes of the data we deal with
(2) cutoff and filtration are same with function "bar_and_cocycle_to_file"
(3) unit is the size of each bin
(4) eta is the parameter for weight
(5) before this funcition, function create_coordinate_with_associated_distance() should
be performed.
'''
#######################################################################################
t = len(train_data)
column0 = int((filtration - 2)/unit )
column1 = int((filtration - 2)/unit)
column_cocycle0 = int( (filtration - 2)/unit )
column_cocycle1 = int( (filtration - 2)/unit )
feature_matrix = np.zeros(( end - start , 36 * ( column0 + column1 + column_cocycle0 + column_cocycle1 ) ))
for i in range(start,end):
name = train_data[i]
#print('process {0}-th of train feature,{1}'.format(i,name))
count = 0
for P in range(4):
for L in range(9):
filename0 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_bar.csv'
zero_bar = np.loadtxt(filename0,delimiter=',')
filename1 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_bar.csv'
one_bar = np.loadtxt(filename1,delimiter=',')
for n in range(column0):
feature_matrix[i-start][count] = get_number( zero_bar , 2 + unit * n , 2 + unit * (n + 1) )
count = count + 1
for n in range(column_cocycle0):
feature_matrix[i-start][count] = get_cocycle_feature_value0_centrality(cutoff,filtration,name,Protein_Atom[P],Ligand_Atom[L],zero_bar,2 + unit * n, 2 + unit * (n+1),eta)
count = count + 1
for n in range(column1):
feature_matrix[i-start][count] = get_number( one_bar , 2 + unit * n , 2 + unit * (n+1) )
count = count + 1
for n in range(column_cocycle1):
feature_matrix[i-start][count] = get_cocycle_feature_value1_centrality(cutoff,filtration,name,Protein_Atom[P],Ligand_Atom[L],one_bar,2 + unit * n, 2 + unit * (n+1),eta)
count = count + 1
np.savetxt(pre + '../data/' + Year + '/pocket_feature/eta_' + str(eta) + '_cocycle_feature_matrix_of_train_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv',feature_matrix,delimiter=',')
def get_cocycle_feature_of_test(start,end,cutoff,filtration,unit,eta):
######################################################################################
'''
this function generate the testing feature vectors from HWPC, the method is bin counts.
(1) start and end are the indexes of the data we deal with
(2) cutoff and filtration are same with function "bar_and_cocycle_to_file"
(3) unit is the size of each bin
(4) eta is the parameter for weight
(5) before this funcition, function create_coordinate_with_associated_distance() should
be performed.
'''
######################################################################################
t = len(test_data)
column0 = int((filtration - 2)/unit )
column1 = int((filtration - 2)/unit)
column_cocycle0 = int( (filtration - 2)/unit )
column_cocycle1 = int( (filtration - 2)/unit )
feature_matrix = np.zeros(( end - start , 36 * ( column0 + column1 + column_cocycle0 + column_cocycle1 ) ))
for i in range(start,end):
name = test_data[i]
#print('process {0}-th of test feature,{1}'.format(i,name))
count = 0
for P in range(4):
for L in range(9):
filename0 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_zero_bar.csv'
zero_bar = np.loadtxt(filename0,delimiter=',')
filename1 = pre + '../data/' + Year + '/pocket_bar_' + str(cutoff) + '_' + str(filtration) + '/' + name + '_' + Protein_Atom[P] + '_' + Ligand_Atom[L] + '_' + str(cutoff) + '_' + str(filtration) + '_one_bar.csv'
one_bar = np.loadtxt(filename1,delimiter=',')
for n in range(column0):
feature_matrix[i-start][count] = get_number( zero_bar , 2 + unit * n , 2 + unit * (n + 1) )
count = count + 1
for n in range(column_cocycle0):
feature_matrix[i-start][count] = get_cocycle_feature_value0_centrality(cutoff,filtration,name,Protein_Atom[P],Ligand_Atom[L],zero_bar,2 + unit * n, 2 + unit * (n+1),eta)
count = count + 1
for n in range(column1):
feature_matrix[i-start][count] = get_number( one_bar , 2 + unit * n , 2 + unit * (n+1) )
count = count + 1
for n in range(column_cocycle1):
feature_matrix[i-start][count] = get_cocycle_feature_value1_centrality(cutoff,filtration,name,Protein_Atom[P],Ligand_Atom[L],one_bar,2 + unit * n, 2 + unit * (n+1),eta)
count = count + 1
np.savetxt(pre + '../data/' + Year + '/pocket_feature/eta_' + str(eta) + '_cocycle_feature_matrix_of_test_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv',feature_matrix,delimiter=',')
def get_combined_feature(typ,cutoff,filtration,unit):
#####################################################################
'''
this function get the combined feature vectors from HWPC with
a lower eta 2.5 and another HWPC with a higher eta 10
'''
#####################################################################
filename1 = pre + '../data/' + Year + '/pocket_feature/' + 'eta_2.5_cocycle_feature_matrix_of_' + typ + '_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv'
filename2 = pre + '../data/' + Year + '/pocket_feature/' + 'eta_10_cocycle_feature_matrix_of_' + typ + '_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv'
m1 = np.loadtxt(filename1,delimiter=',')
m2 = np.loadtxt(filename2,delimiter=',')
t1 = m1.shape
t2 = m2.shape
number = int((filtration-2)/0.1)
m = np.zeros((t1[0],36*number*2*3))
for i in range(t1[0]):
for j in range(36):
m[i][j*number*6:j*number*6+number*2] = m1[i][j*number*4:j*number*4+number*2]
m[i][j*number*6+number*2:j*number*6+number*3] = m2[i][j*number*4+number:j*number*4+number*2]
m[i][j*number*6+number*3:j*number*6+number*5] = m1[i][j*number*4+number*2:j*number*4+number*4]
m[i][j*number*6+number*5:j*number*6+number*6] = m2[i][j*number*4+number*3:j*number*4+number*4]
filename3 = pre + '../data/' + Year + '/pocket_feature/' + 'mix_eta_2.5_10_cocycle_feature_matrix_of_' + typ + '_36_' + str(cutoff) + '_' + str(filtration) + '_' + str(unit) + '.csv'
np.savetxt(filename3,m,delimiter=',')
############################################################################################################
# feature_generation algorithm ends.
############################################################################################################
# machine_learning algorithm starts.
############################################################################################################
# machine_learning algorithm ends.
def run_for_PDBbind_2013():
##############################################################
'''
by running this function, you can get the results for data2013
(1) before run this function, you should change the parameter
Year to '2013'
'''
##############################################################
# extract coordinate
pocket_coordinate_data_to_file(0,2959)
# create hypergraph
simplices_to_file(0,2959,10.5,0)
# compute persistent cohomology
bar_and_cocycle_to_file(0,2959,10.5,7.5)
# feature generation
get_feature_of_train(0,2764,10.5,7.5,0.1)
get_feature_of_test(0,195,10.5,7.5,0.1)
get_target_matrix_of_train()
get_target_matrix_of_test()
create_coordinate_with_associated_distance(0,2959)
get_cocycle_feature_of_train(0,2764,10.5,7.5,0.1,2.5)
get_cocycle_feature_of_test(0,195,10.5,7.5,0.1,2.5)
get_cocycle_feature_of_train(0,2764,10.5,7.5,0.1,10)
get_cocycle_feature_of_test(0,195,10.5,7.5,0.1,10)
get_combined_feature('train',10.5,7.5,0.1)
get_combined_feature('test',10.5,7.5,0.1)
# machine learning
get_pearson_correlation('HPC','')
get_pearson_correlation('HWPC2.5','eta_2.5_cocycle_')
get_pearson_correlation('HWPC10','eta_10_cocycle_')
get_pearson_correlation('combined', 'mix_eta_2.5_10_cocycle_')
run_for_PDBbind_2013()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
1341,
35720,
13,
1072,
11306,
1330,
17701,
1153,
45686,
278,
8081,
44292,
198,
6738,
1341,
35720,
13,
416... | 1.962437 | 27,554 |
from django.db import models
from signup.models import CustomUser
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
1051,
929,
13,
27530,
1330,
8562,
12982,
198
] | 3.882353 | 17 |
# -*- coding: utf-8 -*-
"""Top-level package for Tmux Session Helper."""
__author__ = """John Hardy"""
__email__ = 'john.hardy@me.com'
__version__ = '0.1.0'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
309,
76,
2821,
23575,
5053,
525,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
7554,
27583,
37811,
198,
834,
12888,
834,
796,
... | 2.373134 | 67 |
import scrapy
from jedeschule.utils import cleanjoin
from scrapy.shell import inspect_response
| [
11748,
15881,
88,
198,
6738,
474,
37507,
354,
2261,
13,
26791,
1330,
3424,
22179,
198,
6738,
15881,
88,
13,
29149,
1330,
10104,
62,
26209,
198
] | 3.8 | 25 |
from boiler import *
| [
6738,
36741,
1330,
1635,
198
] | 4.2 | 5 |