id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11257967 | <filename>recipes/migrations/0014_tag_slug.py<gh_stars>0
# Generated by Django 3.1.1 on 2020-09-11 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0013_auto_20200911_1155'),
]
operations = [
migrations.AddField(
model_name='tag',
name='slug',
field=models.SlugField(blank=True, null=True, verbose_name='Уникальный адрес'),
),
]
| StarcoderdataPython |
324910 | #!/usr/bin/env python2.7
'''argparser.py: argparse example.'''
__author__ = '<NAME>'
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='General description')
parser.add_argument('num', type=int,
help='required number')
parser.add_argument('--verbose', '-v', action='store_true',
help='optional flag')
args = parser.parse_args()
print args
# > ./argparser.py -h
# usage: main.py [-h] [--verbose] num
#
# General description
#
# positional arguments:
# num required number
#
# optional arguments:
# -h, --help show this help message and exit
# --verbose, -v optional flag
#
# > ./argparser.py 10
# Namespace(num=10, verbose=False)
#
# > ./argparser.py -v 10
# Namespace(num=10, verbose=True)
| StarcoderdataPython |
3430977 | <reponame>kithsirij/NLP-based-Syllabus-Coverage-Exam-paper-checker-Tool<filename>database_insert_question_topic.py
import MySQLdb
import nltk
import string
import numpy as np
from nltk.corpus import stopwords
from nltk.stem.porter import *
from PyQt4 import QtGui
from PyQt4.QtGui import *
import math
import operator
db = MySQLdb.connect('localhost', 'root', '', 'new_pyproject')
cursor = db.cursor()
cursorNew = db.cursor(MySQLdb.cursors.DictCursor)
input_arr = {}
# input_count = 0
max_tf_idf = 0
max_topic = ''
tfidf_count = 0
tfidf_count1 = 0
tfidf_count2 = 0
class GenerateBestDocument:
def get_tokens(self, word):
# print 'word --',word
input_count=0
newv = str(word)
# remove nonalpha numeric words
regex = re.compile('[^a-zA-Z,\.!?]')
nonalpha = regex.sub(' ', newv)
lowers = nonalpha.lower()
# remove the punctuation using the character deletion step of translate
no_punctuation = lowers.translate(None, string.punctuation)
# tokenize the words
tokens = nltk.word_tokenize(no_punctuation)
# filtered the words
filtered = [w for w in tokens if not w in stopwords.words('english')];
No_words_in_doc = len(filtered)
print 'Number of words in a document: ', No_words_in_doc
# input_arr[input_count]=filtered
# print input_count,'-->',input_arr[input_count]
print '------', filtered
input_arr.clear()
for w in filtered:
input_arr[input_count] = w
print input_count,'-->',input_arr[input_count]
input_count = input_count + 1
def get_tfidf(self, subject,question,stu_year,semester,yearss):
cursorNew.execute("""SELECT topic FROM subject_topic_with_content where subject_name=%s and student_year=%s and semester=%s""", (subject,stu_year,semester,))
topics = cursorNew.fetchall()
for singleTopic in topics:
tfidf_count = 0
tfidf_count1= 0
tfidf_count2 = 0
print '>>', singleTopic["topic"]
print ''
for arr in input_arr:
print arr,'word////',input_arr[arr]
cursorNew.execute("""SELECT word,tfidf,tf_idf_with_log,tf_idf_with_half FROM process_word_tfidf WHERE word=%s AND topic=%s""",(input_arr[arr], singleTopic["topic"]))
data = cursorNew.fetchall()
for freq in data:
print 'fr--', freq
print ''
print freq["word"]
tfidf = freq["tfidf"]
tfidf_log = freq["tf_idf_with_log"]
tfidf_half = freq["tf_idf_with_half"]
tfidf_count = tfidf_count + tfidf;
tfidf_count1 = tfidf_count1 + tfidf_log;
tfidf_count2 = tfidf_count2 + tfidf_half;
print freq["word"], '-->', tfidf_count, 'log-->', tfidf_count1, 'half-->', tfidf_count2
print ">>>",tfidf_count,"log>>>",tfidf_count1,"half>>>",tfidf_count2
cursorNew.execute("Insert into insert_question(years,student_year,semester,subject_name,topic,question,tfidf,tfidf_with_log,tfidf_with_half) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)", (yearss,stu_year,semester,subject,singleTopic["topic"],question,tfidf_count,tfidf_count1,tfidf_count2,))
db.commit()
##########################2017-10-24##########################################
def get_max_tfidf(self, subject,question,stu_year,semester,yearss):
cursorNew.execute( """SELECT topic FROM subject_topic_with_content where subject_name=%s and student_year=%s and semester=%s""",(subject, stu_year, semester,))
topics = cursorNew.fetchall()
global max_tf_idf
global max_topic
max_topic = ''
max_tf_idf = 0
for singleTopic in topics:
tfidf_count = 0
print '>>', singleTopic["topic"]
for arr in input_arr:
cursorNew.execute("""SELECT word,tfidf FROM process_word_tfidf WHERE word=%s AND topic=%s""",(input_arr[arr], singleTopic["topic"]))
data = cursorNew.fetchall()
for freq in data:
tfidf = freq["tfidf"]
tfidf_count = tfidf_count + tfidf;
print freq["word"], '-->', tfidf_count
if max_tf_idf < tfidf_count:
max_tf_idf = tfidf_count
max_topic = singleTopic["topic"]
print '\n\n'
print question, '--', max_topic,max_tf_idf
cursorNew.execute( "Insert into question_max_tfidf(years,subject_name,question,max_tfidf,student_year,semester,topic) values(%s,%s,%s,%s,%s,%s,%s)",
(yearss,subject,question,max_tf_idf, stu_year, semester,max_topic,))
db.commit()
def subject_and_question(self, getsubject, getquestion,stu_year,semester,yearss):
self.get_tokens(getquestion)
self.get_tfidf(getsubject,getquestion,stu_year,semester,yearss)
self.get_max_tfidf(getsubject, getquestion, stu_year, semester, yearss)
ex = GenerateBestDocument()
| StarcoderdataPython |
5059957 | from .edge import EdgeDao
from .point import PointDao
from .loc import LocDao
from .map import MapDao
from .redis_client import RedisDao | StarcoderdataPython |
1824243 | import socket
from centinel.experiment import Experiment
class TCPConnectExperiment(Experiment):
name = "tcp_connect"
def __init__(self, input_file):
self.input_file = input_file
self.results = []
self.host = None
self.port = None
def run(self):
for line in self.input_file:
self.host, self.port = line.strip().split(' ')
self.tcp_connect()
def tcp_connect(self):
result = {
"host" : self.host,
"port" : self.port
}
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, int(self.port)))
sock.close()
result["success"] = "true"
except Exception as err:
result["failure"] = str(err)
self.results.append(result)
| StarcoderdataPython |
6703537 | <filename>Task2E.py
from datetime import datetime, timedelta
from floodsystem.datafetcher import fetch_measure_levels
import floodsystem.flood as flood
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.plot import plot_water_levels
def run():
stations = build_station_list()
N=5
update_water_levels(stations)
list_of_stations_over_tol = flood.stations_highest_rel_level(stations,N)
for (station,relwaterlevel) in list_of_stations_over_tol:
dt = 10
dates, levels = fetch_measure_levels(
station.measure_id, dt=timedelta(days=dt))
if dates != []:
plot_water_levels(station,dates,levels)
if __name__ == "__main__":
print("*** Task 2E: CUED Part IB Flood Warning System ***")
run() | StarcoderdataPython |
319273 | import os
import tempfile
import traceback
from threading import Thread
from easelenium.ui.file_utils import save_file
from easelenium.ui.parser.parsed_class import ParsedClass
from wx import ALL, EXPAND
FLAG_ALL_AND_EXPAND = ALL | EXPAND
def run_in_separate_thread(target, name=None, args=(), kwargs=None):
thread = Thread(target=target, name=name, args=args, kwargs=kwargs)
thread.start()
return thread
def check_py_code_for_errors(code, *additional_python_paths):
tmp_file = tempfile.mkstemp()
save_file(tmp_file, code)
formatted_exception = check_file_for_errors(tmp_file, *additional_python_paths)
os.remove(tmp_file)
return formatted_exception
def check_file_for_errors(path, *additional_python_paths):
syspath = list(os.sys.path)
for py_path in additional_python_paths:
if os.path.exists(py_path):
os.sys.path.append(py_path)
try:
ParsedClass.get_parsed_classes(path)
os.sys.path = syspath
return None
except Exception:
os.sys.path = syspath
formatted_exc = traceback.format_exc()
return formatted_exc
| StarcoderdataPython |
136622 | <reponame>Alexhuszagh/fast_float
# text parts
processed_files = { }
# authors
for filename in ['AUTHORS', 'CONTRIBUTORS']:
with open(filename) as f:
text = ''
for line in f:
if filename == 'AUTHORS':
text += '// fast_float by ' + line
if filename == 'CONTRIBUTORS':
text += '// with contributions from ' + line
processed_files[filename] = text
# licenses
for filename in ['LICENSE-MIT', 'LICENSE-APACHE']:
with open(filename) as f:
text = ''
for line in f:
text += '// ' + line
processed_files[filename] = text
# code
for filename in [ 'fast_float.h', 'float_common.h', 'ascii_number.h',
'fast_table.h', 'decimal_to_binary.h', 'ascii_number.h',
'simple_decimal_conversion.h', 'parse_number.h']:
with open('include/fast_float/' + filename) as f:
text = ''
for line in f:
if line.startswith('#include "'): continue
text += line
processed_files[filename] = text
# command line
import argparse
parser = argparse.ArgumentParser(description='Amalgamate fast_float.')
parser.add_argument('--license', default='MIT', help='choose license')
parser.add_argument('--output', default='', help='output file (stdout if none')
args = parser.parse_args()
text = '\n\n'.join([
processed_files['AUTHORS'], processed_files['CONTRIBUTORS'],
processed_files['LICENSE-' + args.license],
processed_files['fast_float.h'], processed_files['float_common.h'],
processed_files['ascii_number.h'], processed_files['fast_table.h'],
processed_files['decimal_to_binary.h'], processed_files['ascii_number.h'],
processed_files['simple_decimal_conversion.h'],
processed_files['parse_number.h']])
if args.output:
with open(args.output, 'wt') as f:
f.write(text)
else:
print(text)
| StarcoderdataPython |
4878217 | <filename>parsing/HeaderParser.py
import sys
import os
import io
import argparse
import pcpp
from pcpp import OutputDirective, Action
class Register:
width: int
name: str
addr: int
isIO: bool
def __repr__(self):
return "%s(0x%02x)" % (self.name, self.addr)
pcpp.CmdPreprocessor
# Processes register definition file and only leaves defines for registers
class SFRPreprocessor(pcpp.Preprocessor):
def __init__(self):
super().__init__()
self.bypass_ifpassthru = False
self.potential_include_guard = None
self.registers = []
self.define("_AVR_IO_H_ 1")
self.io_macro_start = '_SFR_IO'
self.mem_macro_start = '_SFR_MEM'
self.line_directive = None
def on_comment(self, tok):
# Pass through comments
return True
def on_directive_handle(self, directive, toks, ifpassthru, precedingtoks):
if ifpassthru:
if directive.value == 'if' or directive.value == 'elif' or directive == 'else' or directive.value == 'endif':
self.bypass_ifpassthru = len([tok for tok in toks if tok.value == '__PCPP_ALWAYS_FALSE__' or tok.value == '__PCPP_ALWAYS_TRUE__']) > 0
if not self.bypass_ifpassthru and (directive.value == 'define' or directive.value == 'undef'):
if toks[0].value != self.potential_include_guard:
raise OutputDirective(Action.IgnoreAndPassThrough) # Don't execute anything with effects when inside an #if expr with undefined macro
super().on_directive_handle(directive,toks,ifpassthru,precedingtoks)
if directive.value == 'define':
if self.is_register_define(toks):
self.add_register(toks)
return None
# only leave register definitions for now, bits are too inconsistent
#if self.could_be_port_define(toks) and self.current_register is not None:
# if toks[0].lineno == self.next_line:
# self.next_line += 1
# return None
return None # Pass through where possible
def on_potential_include_guard(self,macro):
self.potential_include_guard = macro
return super().on_potential_include_guard(macro)
def on_include_not_found(self,is_system_include,curdir,includepath):
raise OutputDirective(Action.IgnoreAndPassThrough)
def is_register_define(self, toks):
if len(toks) < 3:
return False
return toks[2].value.startswith(self.io_macro_start) or toks[2].value.startswith(self.mem_macro_start)
def add_register(self, toks):
r = Register()
r.name = toks[0].value;
try:
if toks[2].value.startswith(self.io_macro_start):
r.isIO = True
r.width = int(toks[2].value[len(self.io_macro_start):])
else:
r.isIO = False
r.width = int(toks[2].value[len(self.mem_macro_start):])
r.addr = int([tok for tok in toks if tok.type == self.t_INTEGER][0].value, base=0)
self.registers.append(r)
except:
pass
def could_be_port_define(self, toks):
return len(toks) >= 3 and toks[2].type == self.t_INTEGER
parser = argparse.ArgumentParser(description="Parses avr io headers for register definitions.")
parser.add_argument('inputs', metavar='input', nargs='*', type=argparse.FileType(), help='File(s)to process')
parser.add_argument('--output-dir', dest='output_dir', default='output', metavar='path', help='Output directory for generated files')
parser.add_argument('--output-preprocessed', dest='output_preprocessed',action='store_true',
help='Also output preprocessed header files containing only defines.\nCan be used to extract additional information.')
parser.add_argument('--input-dir', dest='input_dir', help='Process all header files in directory.')
args = parser.parse_args(sys.argv[1:])
input_files = args.inputs
output_dir = args.output_dir
extension = '.hpp'
include_guard_prefix = 'MICROPIN_DETAIL_'
include_guard_postfix = '_INCLUDED'
namespace = 'MicroPin'
required_includes = []
output_files = []
def output_registers(source_filename: str,filename: str, registers: [Register]):
include_guard = include_guard_prefix + filename.rpartition('.')[0].upper() + include_guard_postfix
output = open(output_dir + os.path.sep + filename, "wt")
output.write("// Generated from " + source_filename + '\n')
output.write('#ifndef ' + include_guard + '\n')
output.write('#define ' + include_guard + '\n')
for include in required_includes:
output.write('#include "')
output.write('"\n')
output.write('namespace ' + namespace + '\n{\n')
output.write('\tconstexpr uint8_t sfrOffset = __SFR_OFFSET;\n')
for r in registers:
output.write('\tconstexpr Register')
output.write(str(r.width))
output.write(' r')
output.write(r.name)
output.write('{0x%02x%s};\n' % (r.addr, ' + sfrOffset' if r.isIO else ''))
output.write('}\n\n#endif\n')
output.close()
if args.input_dir is not None:
for file in os.listdir(args.input_dir):
if file.endswith('.h'):
input_files.append(open(args.input_dir + os.path.sep + file))
if len(input_files) > 0:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for input in input_files:
preprocessor = SFRPreprocessor()
filename = os.path.basename(input.name)
preprocessor.parse(input)
output_file = 'Reg' + filename.rpartition('.')[0].replace('io', '').capitalize() + extension
if not args.output_preprocessed:
# Discard preprocessed output
tok = preprocessor.token()
while tok is not None:
tok = preprocessor.token()
input.close()
else:
preprocessed_output = open(output_dir + os.path.sep + filename, 'wt')
preprocessor.write(preprocessed_output)
preprocessed_output.close()
input.close()
if len(preprocessor.registers) > 0:
output_registers(filename, output_file, preprocessor.registers)
output_files.append(output_file)
print('Parsed %s -> %s' % (filename, output_file))
else:
print('Skipped %s because it contained no register definitions' % (filename))
else:
print('No inputs specified')
| StarcoderdataPython |
6550408 | # SPDX-License-Identifier: MIT
"""Todo handler
"""
import falcon
import todo
from middleware import login_required
from .base import RouteBase
class Todo(RouteBase):
"""Handles Todos
Args:
RouteBase (object): Baseclass
"""
@falcon.before(login_required)
def on_get(self, req, resp):
"""GET request
Args:
req (object): request
resp (resp): response
"""
todos = self.service.list()
result = map(lambda x: x.to_dict(), todos)
resp.media = list(result)
@falcon.before(login_required)
def on_post(self, req, resp):
"""POST request
Args:
req (object): request
resp (resp): response
"""
t = req.media.get("todo")
if t is None:
resp.status = falcon.HTTP_400
resp.body = '{message: "Todo object not in request"}'
return
item = self.service.create(t["content"])
resp.media = item.to_dict()
| StarcoderdataPython |
6504692 | <gh_stars>10-100
import os
import h5py
from pyspark.sql import SparkSession
from pyspark.ml.linalg import Vectors
dataset_list = ['glove-25-angular', 'nytimes-16-angular', 'fashion-mnist-784-euclidean']
def convert(spark, outpath, data):
print('processing %s ... ' % outpath, end='')
vectors = map(lambda x: (x[0], Vectors.dense(x[1])), enumerate(data))
if not os.path.exists(outpath):
spark.createDataFrame(vectors).write.parquet(outpath)
expected = len(data)
actual = spark.read.parquet(outpath).count()
if expected != actual:
print('ERROR: expected: %s, actual: %s' % (expected, actual))
else:
print('done')
if __name__ == '__main__':
spark = SparkSession.builder.master('local[*]').config("spark.driver.memory", "10g").getOrCreate()
for dataset in dataset_list:
path = 'test/%s.hdf5' % dataset
if not os.path.exists(path):
print('launch dev/accuracy_test.py first')
else:
dataset_f = h5py.File(path, 'r')
for key in dataset_f:
outpath = 'test/parquet/%s/%s' % (dataset, key)
convert(spark, outpath, dataset_f[key])
| StarcoderdataPython |
4962928 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
## Converts h5 input to short format
## By: <NAME>
## Bring in system mod
import sys
# In[ ]:
## Set user defined variables
## Check we have three inputs!
assert (len(sys.argv) >= 4), "ERROR: This script must include:\n(1) The full path to a ginteractions (tsv) file (which is assumed to be an h5 matrix converted via HicExplorer).\n(2) A genome size (tsv) file with chromosome and size columns.\n(3) A valid output path to save the hic short file."
## Gather data inputs
datapath = str(sys.argv[1])
sizepath = str(sys.argv[2])
savepath = str(sys.argv[3])
## Set verbosity if passed
if (len(sys.argv) == 5):
if str(sys.argv[4]) == 'true':
verbose = True
else:
verbose = False
else:
verbose = False
# ## Set user defined variables
# ## Set input path
# datapath = '/Users/croth/HIC/MRC5/2401.006.h5.toremove.ginteractions.tsv'
#
# ## Set output path
# savepath = '/Users/croth/HIC/MRC5/2401.006.h5.toremove.short'
#
# ## Set path to size file
# sizepath = '/Users/croth/REFERENCES/ENCODE/genome.size.txt'
# #sizepath = '/Users/croth/REFERENCES/ENCODE/test1.size.txt'
# #sizepath = '/Users/croth/REFERENCES/ENCODE/test2.size.txt'
#
# ## Set verbose
# verbose = False
# In[ ]:
## Set other needed variables
## Set verbosity
#verbose = True
## Set input sep
mysep = '\t'
## Set output output sep
outsep = ' '
## Set column names
colname = ['Chrom1','Left1','Right1','Chrom2','Left2','Right2','Quality']
# In[ ]:
## Bring in needed mods
import pandas as pd, numpy as np
## Write a ftn to check index between two dataframes
def checkix(x,y):
x = np.array(sorted(x.index.values))
y = np.array(sorted(y.index.values))
assert (np.sum(x-y) == 0), "ERROR: The indices of the dataframes to not match!"
# In[ ]:
## Load in genomesize and contact data
## Log if verbose
if verbose:
print("Loading genome size and contact (h5) files.")
## Load genome size file
genomesize = pd.read_csv(sizepath,sep=mysep,names=['Chrom','Size'])
## Make a list of chromosomes
chrlist = genomesize.Chrom.tolist()
# In[ ]:
## Load in and set columns
temp = pd.read_csv(datapath,sep=mysep,header=None,names=colname)
## Take total contact counts
contacts = temp.shape[0]
## Print size of temp file
if verbose:
print('Detected %s HiC contacts.'%contacts)
if (contacts == 0):
print('ERROR: No HiC contacts detected!')
sys.exit(1)
# In[ ]:
## Subset data for data in genomesizes file
temp = temp[(temp[colname[0]].isin(chrlist)) &
(temp[colname[3]].isin(chrlist))].reset_index(drop=True)
## Gather the new index after dropping samples
theindex = temp.index.values
## Number of contacts dropped
ndrop = contacts - temp.shape[0]
## calculate total number of conatacts dropped
nperc = np.round(100*ndrop/contacts,3)
## Print the number of dropped contacts
if verbose:
print("WARNING: Removed %s ( %s"%(ndrop,nperc) + " % ) contacts from unlisted chromosomes." )
# In[ ]:
## Check that we have contacts for all chromosomes in chrlist
## Gather chromosomes still in the filtered h5
tempchrlist = list(np.unique(np.concatenate([temp[colname[0]].unique(),temp[colname[3]].unique()])))
## Gather the names of the missing chromosomes
missing = [c for c in chrlist if c not in tempchrlist]
## If any chromosomes are missing
if len(missing) > 0:
print("WARNING: No contacts were detected for chromosomes:")
print("\n".join(missing))
# In[ ]:
## Split by contact type
## Log if verbose
if verbose:
print("Splitting inter- & intra-chromosomal contacts.")
## Gather the between chrom contacts
inter = temp[(temp.Chrom1!=temp.Chrom2)]
## Check the shape and number of inter-chromosome contacts
if verbose and (inter.shape[0] == 0):
print("WARNING: Zero inter-chromosomal contacts detected.")
else:
print("Number of between chromosome contacts: %s"%inter.shape[0])
## Gather the within chromosome contacts
intra = temp[(temp.Chrom1==temp.Chrom2)]
## Check the shape and number of intra-chromosome contacts
if verbose and (intra.shape[0] == 0):
print("ERROR: Zero intra-chromosomal contacts detected.")
sys.exit(1)
else:
print("Number of within chromosome contacts: %s"%intra.shape[0])
## What is the ratio of intra vs inter
if verbose and (intra.shape[0] > 0):
## Calculate ratio
interintra = np.round(100*inter.shape[0]/intra.shape[0],3)
## Print to screen
print('Ratio of inter- to intra-chromosome contacts: %s %s'%(interintra,'%'))
# In[ ]:
## Correct intra chromosomal contacts
## Remove temp
del temp
## Log if verbose
if verbose:
print("Sorting intra-chromosomal contacts.")
## Sort the within chromcontacts by chromosome and left read postition
intrac = pd.concat([intra[(intra.Chrom1==c)].sort_values('Left1') for c in chrlist])
## Delete the old intra
del intra
# In[ ]:
## Split inter chromosome contacts into left and right pairs
## Log status
if verbose and (inter.shape[0]>0):
print("Gathering pairs of inter-chromosomal contacts.")
## Gather left
left = inter[inter.columns[:3]]
## Check work
assert (left.shape[1] == 3), "ERROR: Missing columns of left pairs.\nThere should be three and there are %s"%left.shape[1]
## Gather right
righ = inter[inter.columns[3:-1]]
## Check work
assert (righ.shape[1] == 3), "ERROR: Missing columns of right pairs.\nThere should be three and there are %s"%righ.shape[1]
## Take the correction index
tocorrect = inter.index.values
## Take the quality of between chromosome contacts
interquality = inter[colname[-1]]
# In[ ]:
## Reorder pairs of inter chromosomal contacts
if verbose and (inter.shape[0]>0):
print("Reordering inter-chromosomal contacts by chromosome.")
## Initilize inter list
inter = []
## Iteratively correct the inter chromosome names
for i in tocorrect:
## Gather chromosome names from
## The left pair and ..
c1 = left.loc[i,colname[0]]
## the right pair of the inter chromosome contact
c2 = righ.loc[i,colname[3]]
## Gather chromosome index of the left read and ..
c1ix = genomesize[(genomesize.Chrom==c1)].index.min()
## the right read of the pair in contact
c2ix = genomesize[(genomesize.Chrom==c2)].index.min()
## If the "Left" chromosome is the first in order make in this order
if (c1ix < c2ix):
newline = left.loc[i].tolist() + righ.loc[i].tolist()
## Else if "right" chromosome is the first in order make in this order
else:
newline = righ.loc[i].tolist() + left.loc[i].tolist()
## assert that the chromosomes may not have the same index
assert (c1ix != c2ix), "ERROR: The chromosomes are not inter-chromosomal contacts! "
## append to inter list
inter.append(newline)
## Make list into dataframe
inter = pd.DataFrame(inter,columns=colname[:-1],index=tocorrect)
## Check that we have the same size dataframe
assert (inter.shape[0] == left.shape[0])
# In[ ]:
## Sort inter pairs by chromosome positon
if verbose and (inter.shape[0]>0):
print("Sorting inter-chromosomal contacts by chromosome.")
## Initilize corrected inter (between) chrom contact list
interc = []
## Gather list of chromosomes with trans contacts
interchrs = [c for c in chrlist if c in inter[colname[0]].tolist()]
for c in interchrs:
## Slice the single chromosome
temp = inter[(inter.Chrom1==c)]
## Gather the inter chromosomes
interchrom = genomesize[(genomesize.Chrom.isin(temp[colname[3]].unique()))].Chrom.tolist()
## Sort the right side of the interchromosomes
tempc = pd.concat([temp[(temp[colname[3]]==ic)].sort_values([colname[1],colname[4]]) for ic in interchrom])
## append to the corrected between chromosome contact list
interc.append(tempc)
## concatonate into a dataframe
if (len(interc)>0):
interc = pd.concat(interc)
## Check our work
assert (inter.shape[0] == interc.shape[0])
## Check the index
checkix(inter,interc)
## Delete memory hogs
del tempc
else:
## Set interc to the empty dataframe made above
interc = inter
## Check work
assert (interc.shape[0] == 0)
# In[ ]:
## Combine both sorted inter and intra by sorted chromosome in chrlist
if verbose:
print("Blocking contacts of %s chromosome(s)."%len(chrlist))
## Initilize list
hic = []
## Set counter
ci = 0
## Iterate thru each chromosome
for c in chrlist:
## Slice intra (within)
temp1 = intrac[(intrac[colname[0]]==c)]
## Slice inter (between)
temp2 = interc[(interc[colname[0]]==c)]
## Print a warning if both intra and inter chrom contacts are zero!
if (temp1.shape[0]==0) and (temp2.shape[0]==0):
print('WARNING: No contacts found for %s'%c)
continue
## If there are no between chrom contacts
if (temp2.shape[0]==0):
## Set new temp to just the within chrom contacts
temp = temp1
## Other wise concatinate them
else:
temp = pd.concat([temp1,temp2])
## append to list
hic.append(temp)
## Count
ci += 1
## Check our count
assert ci == len(chrlist)
## make into a dataframe
hic = pd.concat(hic)
## Check the final shape
assert (hic.shape[0] == len(theindex)), "ERROR: There are missing valid HIC contacts!"
## Check inter chrom contacts last column
checkix(hic[(hic[colname[-1]].isna())],interquality)
## Reassign last column to inter chrom contacts
hic.loc[interquality.index,colname[-1]] = interquality.values
## check our assignment
assert (hic.dropna().shape[0] == hic.shape[0]), "ERROR: There is missing data in the HIC dataframe!"
## Check final index
checkix(hic,pd.DataFrame(index=theindex))
# In[ ]:
## Generate a short file
if verbose:
print("Generating hic short file: %s"%savepath)
## gather colunm names to be held over
convertix = np.array([0,1,3,4,6])
## Make new column names
newcols = ['buffer1'] + hic.columns[:2].tolist() + ['buffer2','buffer3'] + hic.columns[3:5].tolist() + ['buffer4'] + hic.columns[-1:].tolist()
## Check that their are nine of these
assert len(newcols) == 9, "ERROR: The short file columns were not generated correctly."
## Initilize short dataframe
short = pd.DataFrame(columns=newcols,index=hic.index)
## For each old column name
for c in colname:
## If its in the new short dataframe assigne it
if c in newcols:
short[c] = hic[c]
else:
pass
## Assign zeros to buffer columns 1,2, and 3
short[['buffer1','buffer2','buffer3']] = 0
## and a one to buffer column 4
short[['buffer4']] = 1
## Convert all the columns except those with the chromosome name to integers
## Gather columns to be converted
toint = [c for c in short.columns if c not in [colname[0],colname[3]]]
## Convert to integers
for c in toint:
short[c] = short[c].apply(int)
## Check that we didn't lose any records
checkix(short,hic)
## SAve out dataframe
short.to_csv(savepath,sep=outsep,header=False,index=False)
## Print finish
if verbose:
print("Finished :D")
| StarcoderdataPython |
380413 | <gh_stars>0
CONFIGURATION_NAMESPACE = 'qmap'
# It is here and not inside the manager module to avoid circular imports
EXECUTION_ENV_FILE_NAME = 'execution'
EXECUTION_METADATA_FILE_NAME = 'execution'
class QMapError(Exception):
"""Base class for this package errors"""
pass
| StarcoderdataPython |
4937681 | <filename>races/project/controller.py<gh_stars>1-10
from project.core.car_factory import CarFactory
from project.driver import Driver
from project.race import Race
class Controller:
def __init__(self):
self.cars = []
self.drivers = []
self.races = []
self.car_factory = CarFactory()
def create_car(self, car_type: str, model: str, speed_limit: int):
if any(c.model == model for c in self.cars):
raise Exception(f'Car {model} is already created!')
try:
car = self.car_factory.create_car(car_type, model, speed_limit)
self.cars.append(car)
return f"{car.__class__.__name__} {car.model} is created."
except RuntimeError:
pass
def create_driver(self, driver_name: str):
if any(d.name == driver_name for d in self.drivers):
raise Exception(f'Driver {driver_name} is already created!')
driver = Driver(driver_name)
self.drivers.append(driver)
return f'Driver {driver.name} is created.'
def create_race(self, race_name: str):
if any(r.name == race_name for r in self.races):
raise Exception(f'Race {race_name} is already created!')
race = Race(race_name)
self.races.append(race)
return f'Race {race.name} is created.'
def add_car_to_driver(self, driver_name: str, car_type: str):
driver = self.__find_driver_by_name(driver_name)
car = self.__find_last_free_car_by_type(car_type)
return driver.change_car(car)
def add_driver_to_race(self, race_name: str, driver_name: str):
race = self.__find_race_by_name(race_name)
driver = self.__find_driver_by_name(driver_name)
return race.register_driver(driver)
def start_race(self, race_name: str):
race = self.__find_race_by_name(race_name)
return race.start()
def __find_driver_by_name(self, driver_name):
for driver in self.drivers:
if driver.name == driver_name:
return driver
raise Exception(f'Driver {driver_name} could not be found!')
def __find_last_free_car_by_type(self, car_type):
for idx in range(len(self.cars) - 1, -1, -1):
car = self.cars[idx]
if not car.is_taken and car.__class__.__name__ == car_type:
return car
raise Exception(f'Car {car_type} could not be found!')
def __find_race_by_name(self, race_name):
for race in self.races:
if race.name == race_name:
return race
raise Exception(f'Race {race_name} could not be found!')
| StarcoderdataPython |
388554 | ## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
# helper conv() function to set up a convolutional 2D layer with an optional attached batch norm layer
def conv(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=True, batch_norm=True):
"""Creates a 2D convolutional layer (for downscaling width and height of the input tensor) with an
attached optional batch normalization layer.
Arguments:
in_channels: input channels resp. depth of input tensor
out_channels: output channels resp. depth of output tensor
kernel_size: kernal size of transposed convolutional filter (default: 3)
stride: stride to shift the filter kernel along tensor width and height (default: 1)
padding: number of rows / colums padded with zeros on the outer rims of the tensor (default: 1)
bias: bias (default: True)
batch_norm: flag to switch batch normalization on (batch_norm = True) or off (batch_norm = False)
"""
# initialize list of layers
layers = []
# specify 2D convolutional layer
conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias)
# append 2D convolutional layer
layers.append(conv_layer)
if batch_norm:
# append 2D batch normalization layer
layers.append(nn.BatchNorm2d(out_channels, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
# return sequential stack of layers
return nn.Sequential(*layers)
# helper lin() function to set up a linear 1D layer with an optional attached batch norm layer
def lin(in_features, out_features, bias=True, batch_norm=True):
"""Creates a 2D convolutional layer (for downscaling width and height of the input tensor) with an
attached optional batch normalization layer.
Arguments:
in_features: input features of input tensor
out_features: output features of output tensor
bias: bias (default: True)
batch_norm: flag to switch batch normalization on (batch_norm = True) or off (batch_norm = False)
"""
# initialize list of layers
layers = []
# specify 1D linear layer
lin_layer = nn.Linear(in_features, out_features, bias)
# append 1D linear layer
layers.append(lin_layer)
if batch_norm:
# append 1D batch normalization layer
layers.append(nn.BatchNorm1d(out_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True))
# return sequential stack of layers
return nn.Sequential(*layers)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# self.conv1 = nn.Conv2d(1, 32, 5)
## Note that among the layers to add, consider including:
# maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or
# batch normalization) to avoid overfitting
# set basic depth of sequential convolutional layers
conv_dim = 32
## Define layers of a CNN
## Feature extractor
# 1st convolutional layer with 1 x 3 x 3 filter kernel (sees a 1 x 224 x 224 tensor)
self.conv1 = conv(in_channels=1, out_channels=conv_dim,
kernel_size=3, stride=1, padding=1, bias=True, batch_norm=True)
# 2nd convolutional layer with 32 x 3 x 3 filter kernel (sees a 32 x 112 x 112 tensor)
self.conv2 = conv(in_channels=conv_dim, out_channels=2*conv_dim,
kernel_size=3, stride=2, padding=1, bias=True, batch_norm=True)
# 3rd convolutional layer with 64 x 3 x 3 filter kernel (sees 64 x 28 x 28 tensor)
self.conv3 = conv(in_channels=2*conv_dim, out_channels=4*conv_dim,
kernel_size=3, stride=2, padding=1, bias=True, batch_norm=True)
# dropout layer (p=0.2)
self.drop = nn.Dropout(p=0.2)
# Max pooling layer
self.pool = nn.MaxPool2d(2, 2)
## Linear Classifier
# 1st fully-connected linear layer 1 with 1024 nodes (sees a 128 x 7 x 7 tensor)
self.fc1 = lin(in_features=128*7*7, out_features=1024, bias=True, batch_norm=True)
# 2nd and final fully-connected linear layer 2 with 68 x 2 = 136 nodes (sees a 1 x 1024 tensor)
self.fc2 = lin(in_features=1024, out_features=136, bias=True, batch_norm=False)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
## x = self.pool(F.relu(self.conv1(x)))
## Feature extractor
# Convolutional hidden layer 1 with batch normalization, relu activation function and max pooling
x = self.pool(F.relu(self.conv1(x)))
# Dropout layer 1
x = self.drop(x)
# Convolutional hidden layer 2 with batch normalization, relu activation function and max pooling
x = self.pool(F.relu(self.conv2(x)))
# Dropout layer 2
x = self.drop(x)
# Convolutional hidden layer 3 with batch normalization, relu activation function and max pooling
x = self.pool(F.relu(self.conv3(x)))
# Dropout layer 3
x = self.drop(x)
## Classifier
# Flatten 128 x 7 x 7 input tensor to first fully conntected layer
x = x.view(x.size(0), -1)
# Fully connected hidden layer fc1 with batch normalization and relu activation function
x = F.relu(self.fc1(x))
# Dropout layer 4
x = self.drop(x)
# Fully connected hidden layer fc2 (no batch normalization, no activation function) => return
# facial keypoint coordinates in (x, y) pairs
x = self.fc2(x)
# a modified x, having gone through all the layers of your model, should be returned
return x
def predict(self, x):
# Predict outputs in forward pass (without dropout) while also returning activations and feature maps
# Initialize dictionary of activations, feature maps and layer outputs
activations = {}
feature_maps = {}
layer_outputs = {}
## Feature extractor
# Activations with batch normalization of convolutional hidden layer conv1
a = self.conv1(x)
activations['conv1'] = a
# Feature map after applying relu activation function on activations of convolutional layer conv1
h = F.relu(a)
feature_maps['conv1'] = h
# Max pooling of feature maps of convolutional layer conv1
out = self.pool(h)
layer_outputs['pool_conv1'] = out
# Dropout layer 1
x = self.drop(out)
# Activations with batch normalization of convolutional hidden layer conv2
a = self.conv2(x)
activations['conv2'] = a
# Feature map after applying relu activation function on activations of convolutional layer conv2
h = F.relu(a)
feature_maps['conv2'] = h
# Max pooling of feature maps of convolutional layer conv2
out = self.pool(h)
layer_outputs['pool_conv2'] = out
# Dropout layer 2
x = self.drop(out)
# Activations with batch normalization of convolutional hidden layer conv3
a = self.conv3(x)
activations['conv3'] = a
# Feature map after applying relu activation function on activations of convolutional layer conv3
h = F.relu(a)
feature_maps['conv3'] = h
# Max pooling of feature maps of convolutional layer conv3
out = self.pool(h)
layer_outputs['pool_conv3'] = out
# Dropout layer 3
x = self.drop(out)
## Classifier
# Flatten 128 x 7 x 7 input tensor to first fully conntected layer
x = x.view(x.size(0), -1)
# Activations with batch normalization of fully connected layer fc1
a = self.fc1(x)
activations['fc1'] = a
# Layer output after applying relu activation function on activations of fully connected layer fc1
out = F.relu(a)
layer_outputs['fc1'] = out
# Dropout layer 4
x = self.drop(out)
# Add fully connected hidden layer fc2 (no batch normalization, no activation function) => return
# facial keypoint coordinates in (x, y) pairs
key_pts = self.fc2(x)
# Return predictions (tensor) plus activations, feature maps and layer outputs (dictionary of tensors)
return key_pts, activations, feature_maps, layer_outputs
# return key_pts, feature_maps
| StarcoderdataPython |
177571 | # -*- coding: utf-8 -*-
import info
from Package.PerlPackageBase import *
class subinfo(info.infoclass):
def setDependencies( self ):
self.runtimeDependencies["dev-utils/perl"] = None
def setTargets(self):
for ver in ["0.016"]:
self.targets[ver] = f"https://search.cpan.org/CPAN/authors/id/Z/ZE/ZEFRAM/Module-Runtime-{ver}.tar.gz"
self.targetInstSrc[ver] = f"Module-Runtime-{ver}"
self.targetDigests["2.29"] = (['68302ec646833547d410be28e09676db75006f4aa58a11f3bdb44ffe99f0f024'], CraftHash.HashAlgorithm.SHA256)
self.tags = 'Module-Runtime'
self.defaultTarget = '0.016'
class Package(PerlPackageBase):
def __init__(self, **args):
PerlPackageBase.__init__(self)
| StarcoderdataPython |
12826964 | import numpy as np
import pandas as pd
import sklearn.mixture as mix
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.dates import YearLocator, MonthLocator
import seaborn as sns
import missingno as msno
import quandl as qd
# reference:
# http://www.blackarbs.com/blog/introduction-hidden-markov-models-python-networkx-sklearn/2/9/2017
# get fed data
f1 = 'TEDRATE' # ted spread
f2 = 'T10Y2Y' # constant maturity ten yer - 2 year
f3 = 'T10Y3M' # constant maturity 10yr - 3m
start = pd.to_datetime('2002-01-01')
end = pd.datetime.today()
data_SPY = qd.get('LSE/SPY5')
data_f1 = qd.get('FRED/TEDRATE')
data_f2 = qd.get('FRED/T10Y2Y')
data_f3 = qd.get('FRED/T10Y3M')
data = pd.concat([data_SPY['Price'], data_f1, data_f2, data_f3], axis=1, join='inner')
data.columns = ['SPY', f1, f2, f3]
data['sret'] = np.log( data['SPY']/ data['SPY'].shift(1))
print(' --- Data ---')
print(data.tail())
# quick visual inspection of the data
msno.matrix(data)
col = 'sret'
select = data.ix[:].dropna()
ft_cols = [f1, f2, f3, col]
X = select[ft_cols].values
print('\nFitting to HMM and decoding ...', end='')
model = mix.GaussianMixture(n_components=4,
covariance_type='full',
n_init=100,
random_state=7).fit(X)
# Predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print('done!\n')
print('Score: %.2f;\tBIC: %.2f;\tAIC:%.2f;\n' % (model.score(X), model.bic(X), model.aic(X)))
print('Means and vars of each hidden state')
for i in range(model.n_components):
print('%d th hidden state' % i)
print('mean = ', model.means_[i])
print('var = ', np.diag(model.covariances_[i]))
print()
sns.set(font_scale=1.25)
style_kwds = {'xtick.major.size': 3, 'ytick.major.size': 3,
'font.family':u'courier prime code', 'legend.frameon': True}
sns.set_style('white', style_kwds)
fig, axs = plt.subplots(model.n_components, sharex=True, figsize=(12,9))
colors = cm.rainbow(np.linspace(0, 1, model.n_components))
for i, (ax, color) in enumerate(zip(axs, colors)):
# Use fancy indexing to plot data in each state.
mask = hidden_states == i
ax.plot_date(select.index.values[mask],
select[col].values[mask], '.-', c=color)
ax.set_title('%d th hidden state' % i, fontsize=16, fontweight='demi')
# Format the ticks.
ax.xaxis.set_major_locator(YearLocator())
ax.xaxis.set_minor_locator(MonthLocator())
sns.despine(offset=10)
plt.tight_layout()
plt.show()
# fig.savefig('Hidden Markov (Mixture) Model_Regime Subplots.png')
sns.set(font_scale=1.5)
states = (pd.DataFrame(hidden_states, columns=['states'], index=select.index)
.join(select, how='inner')
.assign(mkt_cret=select.sret.cumsum())
.reset_index(drop=False)
.rename(columns={'index':'Date'}))
print(' --- States ---')
print(states.tail())
sns.set_style('white', style_kwds)
order = [0, 1, 2]
fg = sns.FacetGrid(data=states, hue='states', hue_order=order,
palette=colors, aspect=1.31, size=12)
fg.map(plt.scatter, 'Date', 'SPY', alpha=0.8).add_legend()
sns.despine(offset=10)
fg.fig.suptitle('Historical SPY Regimes', fontsize=24, fontweight='demi')
plt.tight_layout()
plt.show()
# fg.savefig('Hidden Markov (Mixture) Model_SPY Regimes.png') | StarcoderdataPython |
11386738 | <filename>pyblnet/__init__.py
from .blnet_web import BLNETWeb, test_blnet
from .blnet_conn import BLNETDirect
from .blnet import BLNET | StarcoderdataPython |
271247 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import sys
import threading
import time
from multistructlog import create_logger
from xosconfig import Config
log = create_logger(Config().get("logging"))
class Backend:
def run(self):
# start model policies thread
policies_dir = Config("model_policies_dir")
if policies_dir:
from synchronizers.model_policy import run_policy
model_policy_thread = threading.Thread(target=run_policy)
model_policy_thread.start()
else:
model_policy_thread = None
log.info("Skipping model policies thread due to no model_policies dir.")
while True:
try:
time.sleep(1000)
except KeyboardInterrupt:
print("exiting due to keyboard interrupt")
if model_policy_thread:
model_policy_thread._Thread__stop()
sys.exit(1)
| StarcoderdataPython |
1917651 | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from .views import Login, logout_then_login
urlpatterns = [
path('login/', Login.as_view(), name='login'),
path('logout/', logout_then_login, name='logout'),
path('admin/', admin.site.urls),
path('dnd5e/', include('dnd5e.urls', namespace='dnd5e')),
path('markdownx/', include('markdownx.urls')),
]
if settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:
import debug_toolbar
from django.conf.urls.static import static
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns | StarcoderdataPython |
12838508 | import sys
from boto3.session import Session
from .models import Dataset
from .utils import get_headers
def get_instances_as_table(profile=None, region_name='us-east-1'):
session = Session(profile_name=profile)
ec2 = session.resource('ec2')
data = extract_data_from_objects(ec2.instances.all())
# enrich data with meta info
for row in data:
row.update({'AWSProfile': profile})
def kf(k):
"""Keep everything under the 'meta.data.' header name.
Keep the AWSProfile keyname around.
Throw out everything else.
"""
prefix = 'meta.data.'
if k.startswith(prefix):
return k[len(prefix):]
if k == 'AWSProfile':
return k
return None
data = clean_data(data, key_filter=kf)
return Dataset(headers=sorted(get_headers(data)), data=data)
def clean_data(data, key_filter=None, value_filter=None):
if key_filter is None:
key_filter = lambda x:x
if value_filter is None:
value_filter = lambda x:x
updated_data = []
for row in data:
updated_row = {}
for k,v in row.iteritems():
k = key_filter(k)
if k is None:
continue
v = value_filter(v)
updated_row[k] = v
updated_data.append(updated_row)
return updated_data
def extract_data_from_objects(objs):
"""Take in an iterable of objects and extract each object's attributes into
a dictionary, as long as the attributes don't start with a '_', are callable,
and aren't themselves iterable.
:param objs iter[object]: an iterable of python objects
:rtype: list[dict]
"""
nested_data = [loop('', obj) for obj in objs]
return [{x.lstrip('.'):y for x, y in strip_nones(flatten(item))} for item in nested_data]
def strip_nones(l): return (x for x in l if x is not None)
def flatten(l):
"""Flatten an arbitrarily nested list.
"""
for el in l:
if isinstance(el, list) and not isinstance(el, basestring):
for sub in flatten(el):
yield sub
else:
yield el
def loop(key, val):
"""These are either class instances or dicts, and we're trying to get their members.
"""
if hasattr(val, '__dict__'): # convert instance object to a dict
val = {k:v for k,v in val.__dict__.iteritems() if not k.startswith('_')}
if isinstance(val, dict):
return [loop(key + '.' + str(k), v) for k, v in val.iteritems()]
elif hasattr(val, '__iter__'):
pass #TODO do something with iterables
else: # it must be a normal, non-container object
return (key, val)
| StarcoderdataPython |
4800276 | import time
start = time.strftime('%H:%M:%S', time.localtime())
i =0
while True:
if (start != time.strftime('%H:%M:%S', time.localtime())):
print('Ops per second:',i/10**3, '\bk')
break
i+=1
| StarcoderdataPython |
116919 | '''
Finding minimum cost path in 2-D array "array[][]" to reach a position (left, right)
in array[][] from (0, 0).
Total cost of a path to reach (left, right) is sum of all the costs on that
path (including both source and destination).
'''
import sys
# Finding minimum cost path in 2-D array
def minimumCost(array, left, right):
# For invalid left and right query
if (left < 0 or right < 0):
return sys.maxsize
elif (left == 0 and right == 0):
return array[left][right]
else:
# Finding path with minimum cost i.e. which way to move down, right and diagonally lower cells
x = minimumCost(array, left - 1, right - 1)
y = minimumCost(array, left - 1, right)
z = minimumCost(array, left, right - 1)
if (x < y):
minimum = x if (x < z) else z
else:
minimum = y if (y < z) else z
return array[left][right] + minimum
# Driver program
row = int(input())
col = int(input())
array = []
for i in range(row):
a = []
for j in range(col):
a.append(int(input()))
array.append(a)
left = int(input())
right = int(input())
print(minimumCost(array, left, right))
'''
Input:
row = 3
col = 3
array = {{1, 2, 3},
{4, 5, 6},
{7, 8, 9}}
left = 2
right = 2
Output:
15
Because to reach from (0, 0) to (2, 2)
the cost for minimum path is (0, 0) –> (1, 1) –> (2, 2); 1 + 5 + 9 = 15
'''
| StarcoderdataPython |
3461518 | <filename>api/setup_evaluation.py<gh_stars>1-10
import os
import string
import subprocess
import logging
import json
from pathlib import Path
from collections import OrderedDict
from functools import reduce
import re
from math import floor
from multiprocessing import Pool, cpu_count
from random import seed
import random
from datetime import datetime
from joblib import dump
from progress.bar import Bar
from progress.spinner import Spinner
import numpy as np
from pandas import DataFrame
from pymongo import MongoClient
from joblib import dump
from imblearn.pipeline import Pipeline
from imblearn.combine import SMOTEENN
from thundersvm import SVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import classification_report, balanced_accuracy_score
from harpocrates_server.db import create_db_client, MONGO_URI
from harpocrates_server.service import TRAIN_DATA_DIR, MODELS_DIRECTORY, TRAIN_LABELS
from harpocrates_server.service.data_parsing import (
extract_data,
extract_paths_and_labels,
read_file,
)
from harpocrates_server.service.classification import (
get_model,
CLASSIFIERS,
get_vectorizer,
train_and_store_classifier,
)
from harpocrates_server.service.document import text_contents_from_document_body
from harpocrates_server.controllers.document_controller import (
calculate_text_content_classifications,
classify,
classify_text,
get_document,
)
from harpocrates_server.models.document import Document
from bson.objectid import ObjectId
MODEL_DIRECTORY = Path("instance", "models")
ANNOTATIONS_PATH = Path(
"/home/architect/git_repositories/dissertation/data", "annotations.json"
)
# Necessary for reproducing experimental setup
SEED = 32
def generate_id(length=6):
# create another random number generator
# with unfixed seed
rng = random.Random()
return "".join(rng.choices(string.ascii_uppercase + string.digits, k=length))
USER_ID = generate_id()
def intersect(*arrays):
return reduce(np.intersect1d, arrays)
def extract_annotations():
path_annotations = OrderedDict()
train_data_dir = Path(TRAIN_DATA_DIR)
with open(ANNOTATIONS_PATH, "r") as annotations_file:
for line in annotations_file:
relative_path, annotations = line.split(":", 1)
relative_path += ".html"
full_path = train_data_dir.joinpath(*Path(relative_path).parts[-3:])
annotations = json.loads(annotations)
path_annotations[str(full_path)] = annotations
return path_annotations
def process_document(odcument, collection, trained_model):
db = create_db_client(db_name=USER_ID)
classify.__globals__["db"] = db
granularity: str = "paragraph"
text_contents = text_contents_from_document_body(
document["content"], granularity=granularity
)
document_object = Document(
text_contents=text_contents,
text_split_granularity=granularity,
name=document["document_number"],
)
operation_result = db[collection].insert_one(document_object.to_dict())
doc_id = operation_result.inserted_id
classification = classify_text(document["content"], trained_model=trained_model)
classified_text_contents = calculate_text_content_classifications(
document_object,
explanations=classification.explanations,
trained_model=trained_model,
)
doc_id = db[collection].update_one(
{"_id": ObjectId(doc_id)},
{
"$set": {
# Update document wide predicted classification
"predictedClassification": classification.to_dict(),
# Update paragrah classifications
"textContents": [
text_content.to_dict() for text_content in classified_text_contents
],
}
},
)
if __name__ == "__main__":
print(USER_ID)
# create env file with mongod database name
with open(".env.sample", "r") as sample_env_file:
env_content = sample_env_file.read()
env_content += "\nMONGO_DB_NAME={}".format(USER_ID)
with open(".env", "w") as env_file:
env_file.write(env_content)
file_paths, train_labels = extract_paths_and_labels()
train_data = extract_data(file_paths)
document_numbers = []
for path in file_paths:
document_numbers.append(Path(path).stem)
# verify order of text and labels
with open(TRAIN_LABELS) as ground_truth_file:
for i, line in enumerate(ground_truth_file.read().splitlines()):
path, classification = line.split(" ")
assert path in file_paths[i]
assert int(classification) == train_labels[i]
assert read_file(file_paths[i]) == train_data[i]
annotations = extract_annotations()
train_data_df = DataFrame([document_numbers, train_data, train_labels]).transpose()
train_data_df.columns = ["document_number", "content", "sensitive"]
train_data_df["S40"] = False
train_data_df["S27"] = False
for path, annotation in annotations.items():
tags = annotation.get("tags")
if tags:
index = np.searchsorted(file_paths, path)
S40 = False
S27 = False
for tag in tags:
if "S40" in tag:
train_data_df.at[index, "S40"] = True
if "S27" in tag:
train_data_df.at[index, "S27"] = True
SEEDS = [7, 8]
all_test_doc_numbers = []
for i, SEED in enumerate(SEEDS):
np.random.seed(SEED)
seed(SEED)
true_negatives = []
false_negatives = []
false_positives = []
true_positives = []
vect = TfidfVectorizer(
norm="l2",
analyzer="word",
stop_words="english",
strip_accents="unicode",
binary=False,
max_df=0.75,
min_df=1,
lowercase=True,
use_idf=False,
smooth_idf=True,
sublinear_tf=True,
)
sampler = SMOTEENN(random_state=SEED)
clf = SVC(
kernel="linear",
C=0.1,
probability=True,
decision_function_shape="ovo",
random_state=SEED,
)
# Create the Pipeline
pipeline = Pipeline(
steps=[("vect", vect), ("sample", sampler), ("clf", clf),], verbose=10,
)
# Split and Train
splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=SEED)
train_data = train_data_df["content"]
train_labels = train_data_df["S40"]
for train_index, test_index in splitter.split(train_data, train_labels):
X_train = np.array(train_data[train_index])
X_test = np.array(train_data[test_index])
y_train = train_labels[train_index]
y_test = train_labels[test_index]
test_data_df = train_data_df.iloc[test_index]
pipeline.fit(X_train, y_train)
# Predict
predictions = pipeline.predict(X_test)
print(classification_report(y_test, predictions))
bac = balanced_accuracy_score(y_test, predictions)
print("Balanced accuracy", bac)
# filters
length = np.vectorize(len)
document_lengths = length(X_test)
correct = np.where(y_test == predictions)
misclassified = np.where(y_test != predictions)
S27 = np.where(np.array(train_data_df["S27"])[test_index] == True)
S40 = np.where(np.array(train_data_df["S40"])[test_index] == True)
train_S27 = np.where(np.array(train_data_df["S27"])[train_index] == True)
train_S40 = np.where(np.array(train_data_df["S40"])[train_index] == True)
actually_sensitive = np.where(y_test == 1)
actually_insensitive = np.where(y_test == 0)
classified_sensitive = np.where(predictions == 1)
classified_insensitive = np.where(predictions == 0)
small = np.where(document_lengths < 2000)
print("Test set small S40 count:", len(intersect(S40, small)))
print("Train set small S40 count:", len(intersect(train_S40, small)))
true_negatives = intersect(classified_insensitive, actually_insensitive, small)
false_negatives = intersect(
classified_insensitive, actually_sensitive, S40, small
)
false_positives = intersect(classified_sensitive, actually_insensitive, small)
true_positives = intersect(classified_sensitive, actually_sensitive, S40, small)
confusion_matrix = """
{seed}\t\tSensitive\t\tNot Sensitive
Sensitive\t\t{true_positives}\t\t{false_positives}
Not Sensitive\t\t{false_negatives}\t\t{true_negatives}
""".format(
true_positives=len(true_positives),
false_negatives=len(false_negatives),
false_positives=len(false_positives),
true_negatives=len(true_negatives),
seed=SEED,
)
print(confusion_matrix)
batch1_indices = [
# smallest_true_negatives,
true_negatives[0],
# smallest_false_negatives,
false_negatives[0],
# smallest_false_positives,
false_positives[0,]
# first 3 true positives
] + [true_positives[n] for n in range(3)]
# create another random number generator
# with unfixed seed
rng = random.Random()
rng.shuffle(batch1_indices)
batch_evaluation_setup_df = test_data_df.iloc[batch1_indices, :]
all_test_doc_numbers += (
test_data_df["document_number"].iloc[batch1_indices].to_list()
)
bar = Bar("Processing test documents", max=batch_evaluation_setup_df.shape[0])
print(batch_evaluation_setup_df)
# create, classify and store documents
for document_index, document in batch_evaluation_setup_df.iterrows():
bar.next()
process_document(document, "collection_{}".format(i), pipeline)
spinner = Spinner("Processing demo document ")
unused_s40_document_indices = intersect(
np.where(train_data_df["S40"].to_numpy() == True)[0],
np.where(train_data_df["document_number"].to_numpy() != all_test_doc_numbers)[
0
][0],
)
# sample a sensitive document that is not in the test set
test_document = train_data_df[unused_s40_document_indices, :]
print(test_data_df.iloc[test_document, :])
process_document(document, "demo", pipeline)
spinner.next()
| StarcoderdataPython |
3398836 | <gh_stars>1000+
"""Define tests for the AEMET OpenData init."""
from unittest.mock import patch
import requests_mock
from homeassistant.components.aemet.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
import homeassistant.util.dt as dt_util
from .util import aemet_requests_mock
from tests.common import MockConfigEntry
CONFIG = {
CONF_NAME: "aemet",
CONF_API_KEY: "foo",
CONF_LATITUDE: 40.30403754,
CONF_LONGITUDE: -3.72935236,
}
async def test_unload_entry(hass):
"""Test that the options form."""
now = dt_util.parse_datetime("2021-01-09 12:00:00+00:00")
with patch("homeassistant.util.dt.now", return_value=now), patch(
"homeassistant.util.dt.utcnow", return_value=now
), requests_mock.mock() as _m:
aemet_requests_mock(_m)
config_entry = MockConfigEntry(
domain=DOMAIN, unique_id="aemet_unique_id", data=CONFIG
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.NOT_LOADED
| StarcoderdataPython |
8171147 | <reponame>mikiec84/ls.joyous<filename>ls/joyous/migrations/0004_auto_20180425_2355.py<gh_stars>10-100
# Generated by Django 2.0.3 on 2018-04-25 11:55
from django.db import migrations
import ls.joyous.models.events
import timezone_field.fields
class Migration(migrations.Migration):
dependencies = [
('joyous', '0003_extrainfopage_extra_title'),
]
operations = [
migrations.AddField(
model_name='multidayeventpage',
name='tz',
field=timezone_field.fields.TimeZoneField(default=ls.joyous.models.events._get_default_timezone, verbose_name='Time zone'),
),
migrations.AddField(
model_name='recurringeventpage',
name='tz',
field=timezone_field.fields.TimeZoneField(default=ls.joyous.models.events._get_default_timezone, verbose_name='Time zone'),
),
migrations.AddField(
model_name='simpleeventpage',
name='tz',
field=timezone_field.fields.TimeZoneField(default=ls.joyous.models.events._get_default_timezone, verbose_name='Time zone'),
),
]
| StarcoderdataPython |
3492751 | from typing import List
class Solution:
def canJump(self, nums: List[int]) -> bool:
# the idea is to use DP and loop through "nums" reversely
# the base case is the "last position",
# this means if we are at the last position,
# we can win the jump game
goal = len(nums)-1
# we will check if each of the previous positions can be a new "goal"
# by checking if we can jump from each position to the old "goal"
for i in range(len(nums)-2, -1, -1):
if i + nums[i] >= goal:
goal = i
return goal == 0 | StarcoderdataPython |
3450703 | #!/usr/bin/env python
# pydle.py
# Copyright 2015 <NAME>.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python frontend for Pydle class
"""
from tkinter import Tk, Listbox, Menu, TOP, BOTH, X, LEFT, RIGHT, N, W, S, E, FLAT, END
from ttk import Style, Button, Frame
from pydle import pydle
class App(Frame):
version = "0.1"
padding = 10
screenWidth = 800
screenHeight = 600
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.py = pydle()
self._initUI()
def _initUI(self):
self.parent.title("Pydle v" + self.version)
self.parent.minsize(width=str(self.screenWidth), height=str(self.screenHeight))
# self.parent.config(border=0)
# Styles
style = Style()
style.configure("TFrame", background="gray", border=0)
style.configure("TButton", background="gray", foreground="lightgray", highlightforeground="black", highlightbackground="darkgray", compound=RIGHT, relief=FLAT)
self.config(style="TFrame")
self.pack(fill=BOTH, expand=1)
# Menus
mnuBar = Menu(self.parent)
self.parent.config(menu=mnuBar)
mnuFile = Menu(mnuBar, background="gray")
mnuFile.add_command(label="Exit", command=self.onExitMnu)
mnuBar.add_cascade(label="File", menu=mnuFile)
mnuHelp = Menu(mnuBar, background="gray")
mnuHelp.add_command(label="About", command=self.onAboutMnu)
mnuBar.add_cascade(label="Help", menu=mnuHelp)
# Frame content
frmBooks = Frame(self, style="TFrame")
frmBooks.pack(side=LEFT, anchor=N+W, fill=BOTH, expand=1, padx=(self.padding, self.padding / 2), pady=self.padding)
self.lstBooks = Listbox(frmBooks)
self.lstBooks.config(background="lightgray", foreground="black", borderwidth=0)
self.lstBooks.pack(fill=BOTH, expand=1)
frmButtons = Frame(self)
frmButtons.pack(anchor=N+E, padx=(self.padding / 2, self.padding), pady=self.padding)
btnLoadBooks = Button(frmButtons, text="Load Books", style="TButton", command=self.onLoadBooksBtn)
btnLoadBooks.pack(side=TOP, fill=X)
btnGetNotes = Button(frmButtons, text="Get Notes", style="TButton", command=self.onGetNotesBtn)
btnGetNotes.pack(side=TOP, fill=X)
btnBackupBook = Button(frmButtons, text="Backup Book", style="TButton", command=self.onBackupBtn)
btnBackupBook.pack(side=TOP, fill=X)
btnBackupAllBooks = Button(frmButtons, text="Backup All Books", style="TButton", command=self.onBackupAllBtn)
btnBackupAllBooks.pack(side=TOP, fill=X)
def onLoadBooksBtn(self):
books = self.py.getBooks()
for book in books:
self.lstBooks.insert(END, book["name"])
def onBackupBtn(self):
pass
def onBackupAllBtn(self):
pass
def onGetNotesBtn(self):
notes = self.py.getNotes()
for note in notes:
self.lstBooks.insert(END, note)
def onAboutMnu(self):
pass
def onExitMnu(self):
self.onExit()
def onExit(self):
self.quit()
def main():
root = Tk()
# root.geometry("300x300+300+300")
app = App(root)
root.mainloop()
main()
| StarcoderdataPython |
12824796 | <gh_stars>1-10
from ..utils import action, results_formatter
from functools import partial
import arep
import pytest
import os
results_formatter = partial(results_formatter, name=os.path.basename(__file__))
all_results = results_formatter({
(2, 4), (6, 8), (15, 12)
})
@pytest.fixture
def grepper():
engine = arep.Grepper(os.path.abspath('tests/data/Action/Breaking.py'))
return engine
def test_Breaking(grepper, action):
action.reset()
action.Breaking.consideration = True
grepper.constraint_list.append(action)
assert set(grepper.all_results()) == all_results
| StarcoderdataPython |
4936438 | <filename>usaspending_api/search/v2/urls_search.py<gh_stars>0
from django.conf.urls import url
from usaspending_api.search.v2.views import search
from usaspending_api.search.v2.views import search_elasticsearch as es
from usaspending_api.search.v2.views.new_awards_over_time import NewAwardsOverTimeVisualizationViewSet
from usaspending_api.search.v2.views.spending_by_category import SpendingByCategoryVisualizationViewSet
from usaspending_api.search.v2.views.spending_over_time import SpendingOverTimeVisualizationViewSet
from usaspending_api.search.v2.views.spending_by_geography import SpendingByGeographyVisualizationViewSet
urlpatterns = [
url(r'^new_awards_over_time', NewAwardsOverTimeVisualizationViewSet.as_view()),
url(r'^spending_over_time', SpendingOverTimeVisualizationViewSet.as_view()),
url(r'^spending_by_category', SpendingByCategoryVisualizationViewSet.as_view()),
url(r'^spending_by_geography', SpendingByGeographyVisualizationViewSet.as_view()),
url(r'^spending_by_award_count', search.SpendingByAwardCountVisualizationViewSet.as_view()),
url(r'^spending_by_award', search.SpendingByAwardVisualizationViewSet.as_view()),
url(r'^spending_by_transaction_count', es.SpendingByTransactionCountVisualizaitonViewSet.as_view()),
url(r'^spending_by_transaction', es.SpendingByTransactionVisualizationViewSet.as_view()),
url(r'^transaction_spending_summary', es.TransactionSummaryVisualizationViewSet.as_view())
]
| StarcoderdataPython |
152917 | from os import path, listdir, mkdir
from merge_db.save_merge import Database
from tqdm import tqdm
if __name__ == "__main__":
working_directory = "/Users/Mathieu/Desktop/"
db_folder = "{}/db2".format(working_directory)
# Be sure that the path of the folder containing the databases is correct.
assert path.exists(db_folder), 'Wrong path to db folder, please correct it.'
# Get the list of all the databases
list_db_name = [i[:-3] for i in listdir(db_folder) if i[-3:] == ".db"]
assert len(list_db_name), 'Could not find any db...'
# Take the first database of the list as an example to create the new database that will contain all the data
example_db = Database(folder=db_folder, database_name=list_db_name[0])
columns = example_db.get_columns()
# Create different folder for the new database
new_db_folder = "{}/merged_db".format(working_directory)
if not path.exists(new_db_folder):
mkdir(new_db_folder)
# Create the new database
new_db_name = "combinations"
new_db = Database(folder=new_db_folder, database_name=new_db_name)
# Create the table in the new database
if new_db.has_table("data"):
new_db.remove_table("data")
new_db.create_table("data", columns=columns)
# Fill the new database, displaying some nice progression bar
for db_name in tqdm(list_db_name):
db_to_merge = Database(folder=db_folder, database_name=db_name)
data = db_to_merge.read_n_rows(columns=columns)
new_db.write_n_rows(columns=columns, array_like=data)
| StarcoderdataPython |
4868112 | import math
from multiprocessing import Pool
import numpy as np
import gym.spaces.prng as space_prng
from rl_teacher.utils import get_timesteps_per_episode
def _slice_path(path, segment_length, start_pos=0):
# TODO return var
return {
k: np.asarray(v[start_pos:(start_pos + segment_length)])
for k, v in path.items()
if k in ['obs', "actions", 'original_rewards', 'human_obs']}
# randomly get a clip from a trajectory
def sample_segment_from_path(path, segment_length):
"""Returns a segment sampled from a random place in a path. Returns None if the path is too short"""
path_length = len(path["obs"])
if path_length < segment_length:
return None
start_pos = np.random.randint(0, path_length - segment_length + 1)
# Build segment
segment = _slice_path(path, segment_length, start_pos)
return segment
def offset_for_stacking(items, offset):
""" Remove offset items from the end and copy out items from the start
of the list to offset to the original length. """
if offset < 1:
return items
return [items[0] for _ in range(offset)] + items[:-offset]
def stack_frames(obs, depth):
""" Take a list of n obs arrays of shape x and stack them to return an array
of shape (n,x[0],...,x[-1],depth). If depth=3, the first item will be just
three copies of the first frame stacked. The second item will have two copies
of the first frame, and one of the second. The third item will be 1,2,3.
The fourth will be 2,3,4 and so on."""
if depth < 1:
# Don't stack
return np.array(obs)
stacked_frames = np.array([offset_for_stacking(obs, offset) for offset in range(depth)])
# Move the stack to be at the end and return
return np.transpose(stacked_frames, list(range(1, len(stacked_frames.shape))) + [0])
def random_action(env, ob):
""" Pick an action by uniformly sampling the environment's action space. """
return env.action_space.sample()
def null_action(env, ob):
""" Do nothing. """
if hasattr(env.action_space, 'n'): # Is descrete
return 0
if hasattr(env.action_space, 'low') and hasattr(env.action_space, 'high'): # Is box
return (env.action_space.low + env.action_space.high) / 2.0 # Return the most average action
raise NotImplementedError() # TODO: Handle other action spaces
def do_rollout(env, action_function, stacked_frames):
""" Builds a path by running through an environment using a provided function to select actions. """
obs, rewards, actions, human_obs = [], [], [], []
max_timesteps_per_episode = get_timesteps_per_episode(env)
ob = env.reset()
# Primary environment loop
for i in range(max_timesteps_per_episode):
action = action_function(env, ob)
obs.append(ob)
actions.append(action)
ob, rew, done, info = env.step(action)
rewards.append(rew)
human_obs.append(info.get("human_obs"))
if done:
break
# Build path dictionary
path = {
"obs": stack_frames(obs, stacked_frames),
"original_rewards": np.array(rewards),
"actions": np.array(actions),
"human_obs": np.array(human_obs)}
return path
def basic_segment_from_null_action(env_id, make_env, clip_length_in_seconds, stacked_frames):
""" Returns a segment from the start of a path made from doing nothing. """
env = make_env(env_id)
segment_length = int(clip_length_in_seconds * env.fps)
path = do_rollout(env, null_action, stacked_frames)
return _slice_path(path, segment_length)
def basic_segments_from_rand_rollout(
env_id, make_env, n_desired_segments, clip_length_in_seconds, stacked_frames,
# These are only for use with multiprocessing
seed=0, _verbose=True, _multiplier=1
):
""" Generate a list of path segments by doing random rollouts. No multiprocessing. """
segments = []
env = make_env(env_id)
env.seed(seed)
space_prng.seed(seed)
segment_length = int(clip_length_in_seconds * env.fps)
while len(segments) < n_desired_segments:
path = do_rollout(env, random_action, stacked_frames)
# Calculate the number of segments to sample from the path
# Such that the probability of sampling the same part twice is fairly low.
segments_for_this_path = max(1, int(0.25 * len(path["obs"]) / segment_length))
for _ in range(segments_for_this_path):
segment = sample_segment_from_path(path, segment_length)
if segment:
segments.append(segment)
if _verbose and len(segments) % 10 == 0 and len(segments) > 0:
print("Collected %s/%s segments" % (len(segments) * _multiplier, n_desired_segments * _multiplier))
if _verbose:
print("Successfully collected %s segments" % (len(segments) * _multiplier))
return segments
def segments_from_rand_rollout(env_id, make_env, n_desired_segments, clip_length_in_seconds, stacked_frames, workers):
""" Generate a list of path segments by doing random rollouts. Can use multiple processes. """
if workers < 2: # Default to basic segment collection
return basic_segments_from_rand_rollout(env_id, make_env, n_desired_segments, clip_length_in_seconds, stacked_frames)
pool = Pool(processes=workers)
segments_per_worker = int(math.ceil(n_desired_segments / workers))
# One job per worker. Only the first worker is verbose.
jobs = [
(env_id, make_env, segments_per_worker, clip_length_in_seconds, stacked_frames, i, i == 0, workers)
for i in range(workers)]
results = pool.starmap(basic_segments_from_rand_rollout, jobs)
pool.close()
return [segment for sublist in results for segment in sublist][:n_desired_segments]
| StarcoderdataPython |
6443656 | from django.test import TestCase
from events.models import Event
from events.models import Edition
# Create your tests here.
class EventModelTests(TestCase):
def setUp(self):
Event.objects.create(title='event title 1')
Event.objects.create(title='event title same title')
Event.objects.create(title='event title same title')
def test_instance_get_string_repr(self):
event_1 = Event.objects.get(id='1')
self.assertEquals(str(event_1), event_1.title)
def test_create_duplicate_title_slug(self):
event_12 = Event.objects.get(id='3')
self.assertEquals(event_12.slug, 'event-title-same-title-1')
class EditionModelTests(TestCase):
def setUp(self):
event_1 = Event.objects.create(title='edition title 1')
Edition.objects.create(title='edition title 1', event=event_1)
Edition.objects.create(title='edition title same title', event=event_1)
Edition.objects.create(title='edition title same title', event=event_1)
def test_instance_get_string_repr(self):
edition_1 = Edition.objects.get(id='1')
self.assertEquals(str(edition_1), edition_1.title)
def test_create_duplicate_title_slug(self):
edition_12 = Edition.objects.get(id='3')
self.assertEquals(edition_12.slug, 'edition-title-same-title-1')
def test_save_edition_slug(self):
edition_1 = Edition.objects.get(id=1)
edition_1.title = "another edition"
edition_1.save()
self.assertEquals(edition_1.slug, 'edition-title-1') | StarcoderdataPython |
6656765 | from itertools import combinations, count
from typing import Callable, Iterable, List, Tuple
from projecteuler.util.timing import print_time
from util.primes import primes_until
DIGIT_STRINGS = list(map(str, range(10)))
def _get_primes_of_length(digits: int) -> Tuple[List[int], Callable[[int], bool]]:
"""
Return the primes with a given number of digits
:param digits: the number of digits the primes should have
:return: A list of primes, and a function for determining membership
"""
# Sorted list for iteration
sorted_primes = [prime for prime in primes_until(10 ** digits) if prime > 10 ** (digits - 1)]
# Set for faster containment checking (faster even than binary search)
primes_set = set(sorted_primes)
return sorted_primes, lambda x: x in primes_set
def _wildcard_indices(digits: int, prime: int) -> Iterable[Tuple[int, ...]]:
"""
Given a prime, determines the possible locations of "wildcards": places where the number has the same digits, which
when replaced might give rise to more primes
:param digits: The number of digits of the prime
:param prime: The prime we're determining wildcards for
:return: A generator of wildcard position
"""
for wildcards in range(1, digits + 1):
for combination in combinations(range(digits), wildcards):
prime_string = str(prime)
value = prime_string[combination[0]]
same = all(prime_string[index] == value for index in combination)
if same: # Only those whose original positions on the wildcards are the same
yield combination
def _options(prime: int, wilcard_indices: Tuple[int, ...]) -> Iterable[int]:
"""
:param prime: A prime
:param wilcard_indices: A number of positions where replacement can happen
:return: A generator that returns all numbers with the wildcards replaced with the digits 0-9
"""
prime_list = list(str(prime))
for i in DIGIT_STRINGS:
for index in wilcard_indices:
prime_list[index] = i
yield int(''.join(prime_list))
def problem_0051(family_size: int) -> int:
for digits in count(2):
sorted_primes, prime_check = _get_primes_of_length(digits)
for prime in sorted_primes:
for wildcard_indices in _wildcard_indices(digits, prime):
size = sum(1 for option in _options(prime, wildcard_indices) if prime_check(option))
if size >= family_size:
return next(option for option in _options(prime, wildcard_indices) if prime_check(option))
if __name__ == '__main__':
with print_time():
FAMILY_SIZE = 8
print(problem_0051(FAMILY_SIZE))
# Expected: 121313
| StarcoderdataPython |
4987394 | from dataclasses import dataclass, field
from enum import Enum
from typing import List, Tuple, Set
from dataclasses_json import DataClassJsonMixin
from cloudrail.knowledge.utils.utils import hash_list
@dataclass
class PolicyEvaluation(DataClassJsonMixin):
resource_allowed_actions: Set[str] = field(default_factory=set)
resource_denied_actions: Set[str] = field(default_factory=set)
identity_allowed_actions: Set[str] = field(default_factory=set)
identity_denied_actions: Set[str] = field(default_factory=set)
permission_boundary_applied: bool = False
permission_boundary_allowed_actions: Set[str] = field(default_factory=set)
permission_boundary_denied_actions: Set[str] = field(default_factory=set)
class ConnectionDirectionType(Enum):
INBOUND = 'inbound'
OUTBOUND = 'outbound'
class ConnectionType(Enum):
PRIVATE = 'private'
PUBLIC = 'public'
class ConnectionProperty:
pass
class PolicyConnectionProperty(ConnectionProperty):
def __init__(self, policy_evaluation: List[PolicyEvaluation]):
self.policy_evaluation = policy_evaluation
class PortConnectionProperty(ConnectionProperty):
def __init__(self, ports: List[Tuple[int, int]], cidr_block: str, ip_protocol_type: str):
self.ports: List[Tuple[int, int]] = ports # todo - should be only tuple
self.cidr_block: str = cidr_block
self.ip_protocol_type: str = ip_protocol_type
def __eq__(self, o: object) -> bool:
if isinstance(o, PortConnectionProperty):
return len(o.ports) == len(self.ports) and \
all(o.ports[index][0] == self.ports[index][0] and o.ports[index][1] == self.ports[index][1]
for index in range(len(self.ports))) and \
self.cidr_block == o.cidr_block and \
self.ip_protocol_type == o.ip_protocol_type
return False
def __hash__(self) -> int:
return hash_list([hash_list(self.ports or []), self.cidr_block, self.ip_protocol_type])
@dataclass
class ConnectionDetail:
connection_type: ConnectionType = field(init=False)
connection_property: ConnectionProperty
connection_direction_type: ConnectionDirectionType
class ConnectionInstance:
def __init__(self):
self.inbound_connections: Set[ConnectionDetail] = set()
self.outbound_connections: Set[ConnectionDetail] = set()
def add_private_inbound_conn(self, conn: ConnectionProperty, target_instance: 'ConnectionInstance') -> None:
conn_detail: ConnectionDetail = PrivateConnectionDetail(conn, ConnectionDirectionType.INBOUND, target_instance)
self.inbound_connections.add(conn_detail)
def add_public_inbound_conn(self, conn: ConnectionProperty) -> None:
conn_detail: ConnectionDetail = PublicConnectionDetail(conn, ConnectionDirectionType.INBOUND)
self.inbound_connections.add(conn_detail)
def add_private_outbound_conn(self, conn: ConnectionProperty, target_instance) -> None:
conn_detail: ConnectionDetail = PrivateConnectionDetail(conn, ConnectionDirectionType.OUTBOUND, target_instance)
self.outbound_connections.add(conn_detail)
def add_public_outbound_conn(self, conn: ConnectionProperty) -> None:
conn_detail: ConnectionDetail = PublicConnectionDetail(conn, ConnectionDirectionType.OUTBOUND)
self.outbound_connections.add(conn_detail)
def is_inbound_public(self) -> bool:
return any(x for x in self.inbound_connections if x.connection_type == ConnectionType.PUBLIC)
def is_outbound_public(self) -> bool:
return any(x for x in self.outbound_connections if x.connection_type == ConnectionType.PUBLIC)
@dataclass
class PrivateConnectionDetail(ConnectionDetail):
target_instance: ConnectionInstance
connection_type = ConnectionType.PRIVATE
def __hash__(self) -> int:
return hash((self.connection_type,
self.connection_direction_type,
self.connection_property,
self.target_instance))
@dataclass
class PublicConnectionDetail(ConnectionDetail):
connection_type = ConnectionType.PUBLIC
def __hash__(self) -> int:
return hash((self.connection_type, self.connection_direction_type, self.connection_property))
| StarcoderdataPython |
9778674 | import base64
import hashlib
from typing import List
import cbor2
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.x509.oid import NameOID
from webauthn.helpers.cose import COSEAlgorithmIdentifier
from webauthn.helpers import (
base64url_to_bytes,
validate_certificate_chain,
verify_safetynet_timestamp,
verify_signature,
)
from webauthn.helpers.exceptions import (
InvalidCertificateChain,
InvalidRegistrationResponse,
)
from webauthn.helpers.known_root_certs import globalsign_r2, globalsign_root_ca
from webauthn.helpers.structs import AttestationStatement, WebAuthnBaseModel
class SafetyNetJWSHeader(WebAuthnBaseModel):
"""Properties in the Header of a SafetyNet JWS"""
alg: str
x5c: List[str]
class SafetyNetJWSPayload(WebAuthnBaseModel):
"""Properties in the Payload of a SafetyNet JWS
Values below correspond to camelCased properties in the JWS itself. This class
handles converting the properties to Pythonic snake_case.
"""
nonce: str
timestamp_ms: int
apk_package_name: str
apk_digest_sha256: str
cts_profile_match: bool
apk_certificate_digest_sha256: List[str]
basic_integrity: bool
def verify_android_safetynet(
*,
attestation_statement: AttestationStatement,
attestation_object: bytes,
client_data_json: bytes,
pem_root_certs_bytes: List[bytes],
verify_timestamp_ms: bool = True,
) -> bool:
"""Verify an "android-safetynet" attestation statement
See https://www.w3.org/TR/webauthn-2/#sctn-android-safetynet-attestation
Notes:
- `verify_timestamp_ms` is a kind of escape hatch specifically for enabling
testing of this method. Without this we can't use static responses in unit
tests because they'll always evaluate as expired. This flag can be removed
from this method if we ever figure out how to dynamically create
safetynet-formatted responses that can be immediately tested.
"""
if not attestation_statement.ver:
# As of this writing, there is only one format of the SafetyNet response and
# ver is reserved for future use (so for now just make sure it's present)
raise InvalidRegistrationResponse(
"Attestation statement was missing version (SafetyNet)"
)
if not attestation_statement.response:
raise InvalidRegistrationResponse(
"Attestation statement was missing response (SafetyNet)"
)
# Begin peeling apart the JWS in the attestation statement response
jws = attestation_statement.response.decode("ascii")
jws_parts = jws.split(".")
if len(jws_parts) != 3:
raise InvalidRegistrationResponse(
"Response JWS did not have three parts (SafetyNet)"
)
header = SafetyNetJWSHeader.parse_raw(base64url_to_bytes(jws_parts[0]))
payload = SafetyNetJWSPayload.parse_raw(base64url_to_bytes(jws_parts[1]))
signature_bytes_str: str = jws_parts[2]
# Verify that the nonce attribute in the payload of response is identical to the
# Base64 encoding of the SHA-256 hash of the concatenation of authenticatorData and
# clientDataHash.
# Extract attStmt bytes from attestation_object
attestation_dict = cbor2.loads(attestation_object)
authenticator_data_bytes = attestation_dict["authData"]
# Generate a hash of client_data_json
client_data_hash = hashlib.sha256()
client_data_hash.update(client_data_json)
client_data_hash_bytes = client_data_hash.digest()
nonce_data = b"".join(
[
authenticator_data_bytes,
client_data_hash_bytes,
]
)
# Start with a sha256 hash
nonce_data_hash = hashlib.sha256()
nonce_data_hash.update(nonce_data)
nonce_data_hash_bytes = nonce_data_hash.digest()
# Encode to base64
nonce_data_hash_bytes = base64.b64encode(nonce_data_hash_bytes)
# Finish by decoding to string
nonce_data_str = nonce_data_hash_bytes.decode("utf-8")
if payload.nonce != nonce_data_str:
raise InvalidRegistrationResponse(
"Payload nonce was not expected value (SafetyNet)"
)
# Verify that the SafetyNet response actually came from the SafetyNet service
# by following the steps in the SafetyNet online documentation.
x5c = [base64url_to_bytes(cert) for cert in header.x5c]
if not payload.cts_profile_match:
raise InvalidRegistrationResponse(
"Could not verify device integrity (SafetyNet)"
)
if verify_timestamp_ms:
try:
verify_safetynet_timestamp(payload.timestamp_ms)
except ValueError as err:
raise InvalidRegistrationResponse(f"{err} (SafetyNet)")
# Verify that the leaf certificate was issued to the hostname attest.android.com
attestation_cert = x509.load_der_x509_certificate(x5c[0], default_backend())
cert_common_name = attestation_cert.subject.get_attributes_for_oid(
NameOID.COMMON_NAME,
)[0]
if cert_common_name.value != "attest.android.com":
raise InvalidRegistrationResponse(
'Certificate common name was not "attest.android.com" (SafetyNet)'
)
# Validate certificate chain
try:
# Include known root certificates for this attestation format with whatever
# other certs were provided
pem_root_certs_bytes.append(globalsign_r2)
pem_root_certs_bytes.append(globalsign_root_ca)
validate_certificate_chain(
x5c=x5c,
pem_root_certs_bytes=pem_root_certs_bytes,
)
except InvalidCertificateChain as err:
raise InvalidRegistrationResponse(f"{err} (SafetyNet)")
# Verify signature
verification_data = f"{jws_parts[0]}.{jws_parts[1]}".encode("utf-8")
signature_bytes = base64url_to_bytes(signature_bytes_str)
if header.alg != "RS256":
raise InvalidRegistrationResponse(
f"JWS header alg was not RS256: {header.alg} (SafetyNet"
)
# Get cert public key bytes
attestation_cert_pub_key = attestation_cert.public_key()
try:
verify_signature(
public_key=attestation_cert_pub_key,
signature_alg=COSEAlgorithmIdentifier.RSASSA_PKCS1_v1_5_SHA_256,
signature=signature_bytes,
data=verification_data,
)
except InvalidSignature:
raise InvalidRegistrationResponse(
"Could not verify attestation statement signature (Packed)"
)
return True
| StarcoderdataPython |
9641620 | input = """
c num blocks = 1
c num vars = 100
c minblockids[0] = 1
c maxblockids[0] = 100
p cnf 100 465
-29 57 -100 0
-75 -16 66 0
72 73 93 0
63 -4 -61 0
-47 21 58 0
58 14 89 0
-26 81 50 0
-57 44 -56 0
31 93 -38 0
93 -57 99 0
-94 22 21 0
-45 71 75 0
-98 60 -34 0
-90 -37 87 0
73 1 -41 0
31 -90 89 0
-42 -39 82 0
-47 10 6 0
-37 -22 90 0
73 -86 -44 0
-69 3 79 0
-62 96 -38 0
90 95 -66 0
99 32 -75 0
45 -95 22 0
-33 -9 -88 0
71 -79 6 0
-16 100 76 0
10 -96 -79 0
8 -93 88 0
49 45 13 0
72 27 4 0
24 -4 38 0
-48 51 44 0
-48 72 -25 0
67 -13 -62 0
17 -27 -43 0
-6 50 -63 0
-91 1 -24 0
-100 -91 -42 0
-16 -91 -48 0
83 36 -68 0
34 57 -5 0
6 66 74 0
59 45 1 0
-47 -85 35 0
20 -11 42 0
-83 9 -94 0
79 -41 5 0
10 76 -22 0
29 99 5 0
-79 -86 -33 0
-75 -50 90 0
78 -63 -45 0
4 -62 -65 0
-35 -14 87 0
17 -53 -27 0
64 22 -39 0
-70 -23 59 0
89 -56 -47 0
-70 63 -42 0
-50 -48 32 0
94 -62 66 0
-74 -87 90 0
-79 1 53 0
64 65 -2 0
45 -15 -94 0
-56 31 32 0
60 27 39 0
-88 91 -44 0
26 99 -49 0
-57 -92 -61 0
26 -87 -3 0
-93 -76 70 0
-89 38 -20 0
-26 9 -68 0
-53 -86 43 0
97 -64 67 0
73 -88 94 0
-83 34 37 0
75 -3 -82 0
-85 45 2 0
-89 -11 17 0
-85 -17 -60 0
45 -23 6 0
-39 25 23 0
96 1 67 0
-48 84 -24 0
-3 -75 27 0
70 18 44 0
41 49 39 0
-25 45 -46 0
-22 -19 -39 0
2 55 -91 0
40 35 -50 0
34 86 -95 0
29 -98 62 0
51 44 -88 0
-12 -67 -75 0
-98 31 -78 0
25 -99 73 0
47 -42 -35 0
-91 2 6 0
-24 -2 -88 0
-78 100 -47 0
76 -71 -19 0
-54 18 -44 0
10 -95 -70 0
-19 -54 -80 0
-45 -80 35 0
-97 99 -2 0
-27 9 -67 0
38 -39 41 0
16 23 -62 0
59 -74 57 0
61 -44 37 0
18 -17 -47 0
-14 -45 -9 0
-10 15 56 0
-97 74 81 0
-27 60 25 0
-98 -24 -48 0
-3 -65 14 0
72 89 -86 0
27 -99 -34 0
28 67 96 0
-51 -15 78 0
-66 61 -79 0
1 -71 -16 0
-12 -36 29 0
-34 87 71 0
-28 1 29 0
13 -31 39 0
44 78 -31 0
-7 78 1 0
-95 97 -16 0
-57 -3 -40 0
92 -29 -89 0
-20 56 5 0
-92 23 -85 0
100 -57 -25 0
22 -27 32 0
-38 83 -67 0
37 90 -82 0
-46 -65 -42 0
-67 -81 -79 0
-48 -72 25 0
-90 -92 38 0
-20 53 89 0
71 46 -88 0
76 47 -14 0
98 15 -31 0
-41 -82 100 0
-21 -45 54 0
99 9 -94 0
-70 -65 -37 0
3 -78 -14 0
2 81 55 0
-54 39 10 0
84 -67 -93 0
85 94 -65 0
32 43 -49 0
46 -9 11 0
-96 37 73 0
22 -68 79 0
-5 61 -97 0
-13 -34 -87 0
95 37 78 0
22 52 66 0
-74 59 -52 0
-91 -85 -70 0
12 92 -44 0
7 -56 -10 0
-6 46 14 0
-53 35 76 0
-67 5 -13 0
-76 88 -14 0
-4 -31 -46 0
50 88 1 0
4 -7 -20 0
-7 77 54 0
100 72 -84 0
-100 10 -99 0
57 66 -58 0
-47 -10 2 0
-88 50 -68 0
34 -18 -21 0
-29 36 62 0
-69 29 75 0
-64 28 57 0
-100 97 -60 0
31 -100 -12 0
-82 -81 -98 0
-2 81 -58 0
62 74 14 0
85 -70 -2 0
66 -14 13 0
77 -46 -75 0
32 -30 -43 0
-82 64 -100 0
-87 32 51 0
-22 -12 -18 0
36 30 -59 0
-79 -67 -82 0
-34 92 55 0
92 -2 35 0
24 74 -61 0
-63 41 21 0
-97 43 49 0
89 -45 -93 0
-2 89 33 0
78 -79 -100 0
90 8 82 0
-95 20 -84 0
-100 -2 62 0
52 -23 91 0
41 -61 4 0
22 -13 -12 0
-14 -58 -25 0
20 11 -18 0
32 -12 -14 0
-47 82 78 0
-48 -40 -97 0
-24 79 -43 0
57 47 97 0
-43 54 94 0
13 50 34 0
-96 -58 -11 0
-13 -95 -25 0
31 49 -23 0
-75 37 92 0
-60 -10 -22 0
-100 -8 68 0
-96 25 -75 0
25 76 -67 0
-96 -69 -86 0
79 78 -55 0
-21 -85 -78 0
-59 -81 -29 0
-96 1 54 0
3 16 -27 0
14 -16 95 0
38 57 -84 0
78 -40 25 0
-45 -79 -100 0
37 93 -70 0
-16 51 23 0
87 74 44 0
96 -39 60 0
95 -36 -73 0
84 56 -96 0
3 29 96 0
43 -75 -13 0
-68 -70 54 0
31 43 64 0
98 -1 -10 0
-42 -26 59 0
88 -35 68 0
-77 -44 -69 0
-96 -68 -14 0
90 46 69 0
68 -47 44 0
-27 -24 -21 0
33 17 -32 0
54 47 -25 0
94 13 64 0
-86 -23 43 0
-53 -16 54 0
-58 -10 -35 0
83 -2 -80 0
22 94 46 0
3 -24 90 0
-21 -82 -29 0
93 -100 -68 0
11 95 92 0
-21 59 99 0
-56 -82 -84 0
15 13 -75 0
51 -68 83 0
63 -4 32 0
57 14 -8 0
-67 -1 3 0
-83 -44 -62 0
-23 9 14 0
4 -85 -61 0
63 -46 -98 0
19 -69 38 0
82 -46 -71 0
-13 -69 -31 0
68 -11 -64 0
13 -77 -12 0
-72 65 83 0
-19 83 -56 0
99 -24 -14 0
-85 -13 68 0
-27 19 32 0
41 -16 73 0
98 99 -55 0
11 -68 36 0
67 -32 61 0
80 -49 42 0
-80 -45 -62 0
93 24 -22 0
68 -18 69 0
19 78 71 0
-85 17 19 0
95 30 40 0
-10 -38 70 0
49 -11 74 0
59 -20 53 0
-79 -67 -12 0
59 68 44 0
66 -69 19 0
21 -82 7 0
7 12 -62 0
-63 -30 -1 0
-94 68 59 0
-15 10 66 0
98 31 -77 0
56 -67 68 0
81 54 -62 0
55 -88 17 0
-45 -32 -57 0
-36 -43 48 0
61 88 8 0
-57 98 73 0
30 -67 78 0
-67 -65 -99 0
24 -31 64 0
84 12 -88 0
-10 -16 -8 0
91 -3 -81 0
69 94 -73 0
-46 -65 -77 0
-15 89 -41 0
-69 -31 -87 0
88 32 67 0
92 -73 86 0
79 72 -52 0
17 92 51 0
-72 60 -25 0
48 -28 -44 0
-75 89 -72 0
54 93 -96 0
96 -8 33 0
-50 48 19 0
14 -72 -97 0
-57 -17 -53 0
92 10 82 0
-21 -34 -8 0
-77 -58 -50 0
50 16 -81 0
93 39 -22 0
-78 29 90 0
-95 -56 100 0
19 99 10 0
32 -89 53 0
74 68 65 0
33 6 -37 0
-58 -38 61 0
46 -15 -89 0
-88 -26 74 0
22 -61 -16 0
-12 78 -68 0
20 86 -72 0
-86 -3 -12 0
-82 87 38 0
72 -32 -53 0
-80 72 -41 0
-31 81 33 0
2 -90 98 0
-10 77 -1 0
-58 -19 -63 0
52 -65 -66 0
-4 56 -76 0
-21 63 -18 0
85 -95 80 0
-34 71 -53 0
-57 48 82 0
50 49 11 0
73 87 -3 0
-68 -20 57 0
-88 47 52 0
-42 52 -92 0
-4 -33 -19 0
-63 23 99 0
-5 74 -17 0
-89 37 42 0
6 -7 71 0
-90 -35 75 0
29 -71 38 0
-70 12 -23 0
-4 -28 -79 0
-62 -92 -96 0
68 -87 -13 0
39 13 -99 0
44 52 -32 0
22 41 -5 0
-74 -46 75 0
-34 75 -41 0
-30 22 45 0
85 -27 60 0
-58 -84 100 0
46 17 51 0
-98 -36 -19 0
-28 61 -84 0
75 -49 97 0
33 -89 15 0
61 -27 -29 0
-96 2 -89 0
-19 -4 -10 0
9 -4 -12 0
-94 -42 -2 0
91 80 -54 0
47 48 -76 0
-95 -3 69 0
-62 49 58 0
-27 -39 -17 0
94 -85 -54 0
-77 -35 -6 0
-58 2 96 0
-34 28 -72 0
-45 -60 99 0
-23 88 97 0
-96 -87 -53 0
8 -98 -35 0
-86 -53 -84 0
86 30 97 0
12 44 82 0
66 -20 57 0
71 -89 -67 0
-59 -99 4 0
-79 18 -84 0
17 14 91 0
-50 15 -58 0
-95 -51 50 0
-91 -9 31 0
81 79 -23 0
-7 -34 -67 0
-54 -16 -66 0
-2 32 -25 0
-29 59 -10 0
-3 89 -56 0
-71 28 32 0
-88 -55 30 0
41 29 94 0
-16 11 34 0
93 -51 59 0
27 -74 -98 0
-73 27 38 0
-63 37 -39 0
-32 -58 -65 0
62 -46 49 0
23 -85 -82 0
73 -6 -5 0
45 55 13 0
26 -9 88 0
59 -14 12 0
39 67 -47 0
-65 -69 -85 0
94 -29 -88 0
49 -56 59 0
55 33 34 0
73 -75 42 0
48 -36 11 0
"""
output = "UNSAT"
| StarcoderdataPython |
8160035 | <reponame>rudecs/jumpscale_core7
from JumpScale import j
def cb():
from .HashTool import HashTool
return HashTool()
j.base.loader.makeAvailable(j, 'tools')
j.tools._register('hash', cb)
| StarcoderdataPython |
8044750 | <reponame>albailey/config
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
class KubeCluster(base.Resource):
def __repr__(self):
return "<kube_cluster %s>" % self._info
class KubeClusterManager(base.Manager):
resource_class = KubeCluster
@staticmethod
def _path(name=None):
return '/v1/kube_clusters/%s' % name if name else '/v1/kube_clusters'
def list(self):
"""Retrieve the list of kubernetes clusters known to the system."""
return self._list(self._path(), 'kube_clusters')
def get(self, name):
"""Retrieve the details of a given kubernetes cluster
:param name: kubernetes cluster name
"""
try:
return self._list(self._path(name))[0]
except IndexError:
return None
| StarcoderdataPython |
275754 | '''Simple window with some custom values'''
import sys
from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
w = QtGui.QWidget()
w.resize(300, 200)
w.move(100, 100)
w.setWindowTitle('Simple window')
w.show()
sys.exit(app.exec_())
| StarcoderdataPython |
11306471 | <reponame>afeinstein20/animal_colors
import numpy as np
import matplotlib.pyplot as plt
__all__ = ['Sensitivity']
class Sensitivity(object):
def __init__(self, animal):
"""
Sets the sensitivity scaling for different animals.
Sensitivity scalings are approximated as Gaussians.
Parameters
----------
animal : str
The name of the animal you want to imitate. Current
options are: human, blue tit, turkey, honeybee, pigeon,
and house fly.
"""
self.animal = animal
self.wave_x = np.linspace(300,700,1000)
self.red_lim = 650
self.blue_lim = 500
if animal.lower() == 'human':
self.human()
elif animal.lower() == 'pigeon':
self.pigeon()
elif animal.lower() == 'honeybee':
self.honeybee()
elif animal.lower() == 'blue tit':
self.bluetit()
elif animal.lower() == 'turkey':
self.turkey()
elif animal.lower() == 'house fly':
self.housefly()
else:
raise ValueError('Animal not implemented yet.')
self.set_contributions()
def pdf(self, x, mu, std):
"""
Creates Gaussian distribution for given colors.
Parameters
----------
x : float or np.ndarray
mu : float
Mean value.
std : float
Std value.
"""
fact = np.sqrt(2 * np.pi * std**2)
exp = np.exp(-0.5 * ( (x-mu) / std)**2)
return 1.0/fact * exp
def set_contributions(self):
"""
Makes sure the appropriate wavelengths are contributing
to the color map (e.g. removes red when the sensitivity
function doesn't extend into red wavelengths).
"""
reset = np.zeros(self.mapped.shape)
r = np.where(self.wave_x>=self.red_lim)[0]
b = np.where(self.wave_x<=self.blue_lim)[0]
g = np.where( (self.wave_x<self.red_lim) &
(self.wave_x>self.blue_lim) )[0]
tot = np.nansum(self.mapped, axis=1)
tot /= np.nanmax(tot)
reset[:,0][r] = self.mapped[:,0][r]
reset[:,1][g] = self.mapped[:,1][g]
reset[:,2][b] = self.mapped[:,2][b]
self.total_map = reset
def plot(self):
"""
Plots sensitivity functions.
"""
for i in range(self.mapped.shape[1]):
plt.plot(self.wave_x, self.mapped[:,i], lw=4, label='Cone {}'.format(i))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),
loc='lower left',
ncol=self.mapped.shape[1], mode="expand",
borderaxespad=0.)
plt.xlabel('wavelength [nm]', fontsize=16)
plt.ylabel('sensitivity', fontsize=16)
plt.show()
def human(self):
"""
Creates sensitivity distribution for humans.
"""
human_blue = self.pdf(self.wave_x, 420.0, 40.0)
human_blue /= np.nanmax(human_blue)
human_red = self.pdf(self.wave_x, 590, 50)
human_red /= np.nanmax(human_red)
human_green = self.pdf(self.wave_x, 550, 50)
human_green /= np.nanmax(human_green)
self.mapped = np.array([human_red, human_green, human_blue]).T
def pigeon(self):
"""
Creates sensitivity distribution for pigeons.
"""
bird_blue = self.pdf(self.wave_x, 490.0, 20.0)
bird_blue /= np.nanmax(bird_blue)
bird_ultra_blue = self.pdf(self.wave_x, 400, 40)
bird_ultra_blue /= np.nanmax(bird_ultra_blue)
bird_blue = (bird_blue+bird_ultra_blue)/np.nanmax(bird_blue+bird_ultra_blue)
bird_green = self.pdf(self.wave_x, 550, 20)
bird_green /= np.nanmax(bird_green)
bird_red = self.pdf(self.wave_x, 630, 20)
bird_red /= np.nanmax(bird_red)
self.mapped = np.array([bird_red, bird_green, bird_blue]).T
def honeybee(self):
"""
Creates sensitivity distribution for honeybees.
"""
hb_blue = self.pdf(self.wave_x, 350.0, 30.0)
hb_blue /= np.nanmax(hb_blue)
hb_red = self.pdf(self.wave_x, 550, 40)
hb_red /= np.nanmax(hb_red)
hb_red_lower = self.pdf(self.wave_x, 400, 60.) * 30
red = (hb_red+hb_red_lower)/np.nanmax(hb_red+hb_red_lower)
hb_green = self.pdf(self.wave_x, 450, 30)
hb_green /= np.nanmax(hb_green)
hb_green_lower = self.pdf(self.wave_x, 370, 30) * 30
green = (hb_green+hb_green_lower)/np.nanmax(hb_green+hb_green_lower)
self.mapped = np.array([red, green, hb_blue]).T
def bluetit(self):
"""
Creates sensitivity distribution for the blue tit.
"""
red = self.pdf(self.wave_x, 580, 40)
red /= np.nanmax(red)
green = self.pdf(self.wave_x, 500, 40)
green /= np.nanmax(green)
blue = self.pdf(self.wave_x, 420, 30)
blue /= np.nanmax(blue)
ultra = self.pdf(self.wave_x, 340, 30)
ultra /= np.nanmax(ultra)
blue = (blue+ultra)/np.nanmax(blue+ultra)
self.mapped = np.array([red, green, blue]).T
def turkey(self):
"""
Creates sensitivity distribution for the turkey.
"""
red = self.pdf(self.wave_x, 590, 40)
red /= np.nanmax(red)
green = self.pdf(self.wave_x, 530, 40)
green /= np.nanmax(green)
blue = self.pdf(self.wave_x, 470, 30)
blue /= np.nanmax(blue)
ultra = self.pdf(self.wave_x, 410, 30)
ultra /= np.nanmax(ultra)
blue = (blue+ultra)/np.nanmax(blue+ultra)
self.mapped = np.array([red, green, blue]).T
def housefly(self):
"""
Creates sensitivity distribution for the house fly.
"""
red = self.pdf(self.wave_x, 590, 20)
red /= np.nanmax(red)
green = self.pdf(self.wave_x, 500, 40)
green /= np.nanmax(green)
subgreen = self.pdf(self.wave_x, 410, 60)
subgreen /= (np.nanmax(subgreen)*2)
green = (green+subgreen)/np.nanmax(green+subgreen)
blue = self.pdf(self.wave_x, 360, 30)
blue /= np.nanmax(blue)
self.mapped = np.array([red, green, blue]).T
| StarcoderdataPython |
9611952 | <reponame>JacobGrig/ML-volatility<filename>ml_volatility/ml_volatility/model/model.py
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import statsmodels.api as sm
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from scipy.optimize import fmin_slsqp
class BaseModel:
def __init__(self, data_path):
self.data_path = data_path
self.rv_df = pd.read_csv(data_path / "data.csv")
self.rv_vec = np.sqrt(
self.rv_df.loc[self.rv_df["Symbol"] == ".SPX"]["rv5"].values
) + 1e-30
self.C_vec = self.rv_df.loc[self.rv_df["Symbol"] == ".SPX"]["medrv"].values
self.J_vec = self.rv_vec ** 2 - self.C_vec
self.c_vec = np.log(self.C_vec)
self.j_vec = np.log(self.J_vec + 1)
class MemModel(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
def __prepare_data(self):
self.rv_vec *= 1000
train_test_index = int(0.7 * self.rv_vec.size)
self.rv_train_vec = self.rv_vec[:train_test_index]
self.rv_test_vec = self.rv_vec[train_test_index:]
self.init_mean = self.rv_train_vec.mean()
self.init_var = self.rv_train_vec.var(ddof=1)
self.start_vec = np.array([self.init_mean * 0.01, 0.02, 0.9])
self.bound_vec = np.array([(0.0, 2 * self.init_mean), (0.0, 1.0), (0.0, 1.0)])
self.train_size = self.rv_train_vec.size
self.test_size = self.rv_test_vec.size
self.psi_vec = np.ones(shape=self.train_size) * self.init_mean
return self
def __log_like(self, par_vec, ret_format=False):
epsilon_vec = np.ones(shape=self.train_size)
for t in range(1, self.train_size):
epsilon_vec[t - 1] = self.rv_train_vec[t - 1] / self.psi_vec[t - 1]
self.psi_vec[t] = par_vec.dot(
[1, self.rv_train_vec[t - 1], self.psi_vec[t - 1]]
)
log_like_vec = self.rv_train_vec / self.psi_vec + np.log(self.psi_vec)
if not ret_format:
return np.sum(log_like_vec)
else:
return np.sum(log_like_vec), log_like_vec, np.copy(self.psi_vec)
def __optimize(self):
self.estimate_vec = fmin_slsqp(
self.__log_like,
self.start_vec,
f_ieqcons=lambda par_vec, ret_format=False: np.array(
[1 - par_vec[1] - par_vec[2]]
),
bounds=self.bound_vec,
)
return self
def __predict(self):
n_test = self.rv_test_vec.size
psi_vec = np.zeros(n_test + 1)
psi_vec[0] = self.rv_train_vec[-1]
for i_pred in np.arange(n_test):
psi_vec[i_pred + 1] = self.estimate_vec.dot(
[1, self.rv_test_vec[i_pred], psi_vec[i_pred]]
)
self.rv_pred_vec = psi_vec[1:]
return self
def __error(self):
self.rv_log_test_vec = np.log(self.rv_test_vec / 1000 + 1e-10)
self.rv_log_pred_vec = np.log(self.rv_pred_vec / 1000 + 1e-10)
abs_error = np.mean((self.rv_test_vec / 1000 - self.rv_pred_vec / 1000) ** 2)
log_error = np.mean((self.rv_log_test_vec - self.rv_log_pred_vec) ** 2)
return (
abs_error,
log_error,
self.rv_test_vec / 1000,
self.rv_pred_vec / 1000,
self.rv_log_test_vec,
self.rv_log_pred_vec,
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class HarModel(BaseModel):
def __init__(self, data_path, loss="mse", alpha=0.5):
super().__init__(data_path)
self.MONTH = 22
self.WEEK = 5
self.DAY = 1
self.loss = loss
self.alpha = alpha
self.learning_rate = 0.01
self.tol = 1e-6
def __prepare_data(self):
self.rv_vec = np.log(self.rv_vec + 1e-10)
self.rv_month_vec = (
np.convolve(self.rv_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.sample_size = self.rv_month_vec.size
self.rv_week_vec = (
np.convolve(self.rv_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.rv_day_vec = self.rv_vec[-self.sample_size - 1: -1]
self.feat_mat = np.stack(
[
np.ones(shape=self.sample_size),
self.rv_day_vec,
self.rv_week_vec,
self.rv_month_vec,
]
).T
self.target_vec = self.rv_vec[-self.sample_size:]
train_test_index = int(0.7 * self.sample_size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_train_vec = self.target_vec[:train_test_index]
self.target_test_vec = self.target_vec[train_test_index:]
self.init_mean = self.feat_train_mat.mean(axis=0)
self.init_var = self.feat_train_mat.var(axis=0, ddof=1)
self.start_vec = np.array([self.init_mean[0] * 0.01, 0.9, 0.1, 0.1])
self.weight_vec = self.start_vec
return self
def __gradient(self):
target_est_vec = self.feat_train_mat @ self.weight_vec
delta_vec = np.reshape(
self.target_train_vec.flatten() - target_est_vec, newshape=(-1, 1)
)
if self.loss == "mse":
grad_vec = (
-2 * self.feat_train_mat.T @ delta_vec / self.feat_train_mat.shape[0]
)
error = np.sum(delta_vec ** 2)
elif self.loss == "linex":
grad_vec = (
self.feat_train_mat.T
@ (
np.ones(shape=(self.feat_train_mat.shape[0], 1)) * self.alpha
- self.alpha * np.exp(self.alpha * delta_vec)
)
/ self.feat_train_mat.shape[0]
)
error = np.mean(np.exp(self.alpha * delta_vec) - self.alpha * delta_vec - 1)
elif self.loss == "als":
grad_vec = -(
2
* self.feat_train_mat.T
@ (delta_vec * np.abs(self.alpha - np.int64(np.less(delta_vec, 0))))
/ self.feat_train_mat.shape[0]
)
error = np.sum(
delta_vec ** 2 * np.abs(self.alpha - np.int64(np.less(delta_vec, 0)))
)
else:
grad_vec = None
error = None
return grad_vec, error
def __optimize(self):
iteration = 0
while True:
iteration += 1
grad_vec, delta = self.__gradient()
if iteration % 1000 == 0:
print(f"Iteration: {iteration}, loss: {delta}")
grad_vec = grad_vec.flatten()
weight_vec = self.weight_vec - self.learning_rate * grad_vec
if np.sum(np.abs(weight_vec - self.weight_vec)) < self.tol:
self.estimate_vec = weight_vec
return self
self.weight_vec = weight_vec
def __predict(self):
self.target_pred_vec = (self.feat_test_mat @ self.weight_vec).flatten()
return self
def __error(self):
delta_vec = np.exp(self.target_test_vec) - np.exp(self.target_pred_vec)
delta_log_vec = self.target_test_vec - self.target_pred_vec
if self.loss == "mse":
abs_error = np.mean(delta_vec ** 2)
log_error = np.mean(delta_log_vec ** 2)
elif self.loss == "linex":
abs_error = np.mean(
np.exp(self.alpha * delta_vec) - self.alpha * delta_vec - 1
)
log_error = np.mean(
np.exp(self.alpha * delta_log_vec) - self.alpha * delta_log_vec - 1
)
elif self.loss == "als":
abs_error = np.mean(
delta_vec ** 2 * np.abs(self.alpha - np.int64(np.less(delta_vec, 0)))
)
log_error = np.mean(
delta_log_vec ** 2
* np.abs(self.alpha - np.int64(np.less(delta_log_vec, 0)))
)
else:
abs_error = None
log_error = None
return (
abs_error,
log_error,
np.exp(self.target_test_vec),
np.exp(self.target_pred_vec),
self.target_test_vec,
self.target_pred_vec,
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class LSTM(nn.Module):
def __init__(self, input_size=1, hidden_layer_size=100, output_size=1):
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size, hidden_layer_size)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.hidden_cell = (
torch.zeros(1, 1, self.hidden_layer_size),
torch.zeros(1, 1, self.hidden_layer_size),
)
def forward(self, input_seq):
lstm_out, self.hidden_cell = self.lstm(
input_seq.view(len(input_seq), 1, -1), self.hidden_cell
)
prediction_vec = self.linear(lstm_out.view(len(input_seq), -1))
return prediction_vec[-1]
class LstmModel(BaseModel):
def __init__(self, data_path, loss="mse", alpha=0.5):
def linex_loss(pred_vec, target_vec):
delta_vec = target_vec - pred_vec
return torch.sum(
torch.exp(self.alpha * delta_vec) - self.alpha * delta_vec - 1
)
def als_loss(pred_vec, target_vec):
delta_vec = target_vec - pred_vec
return torch.mean(
delta_vec ** 2
* torch.abs(
self.alpha - torch.less(delta_vec, 0).type(torch.DoubleTensor)
)
)
super().__init__(data_path)
self.loss = loss
self.alpha = alpha
self.learning_rate = 0.01
self.tol = 1e-5
self.depth = 10
self.n_epochs = 15
if self.loss == "mse":
self.loss_function = nn.MSELoss()
elif self.loss == "linex":
self.loss_function = linex_loss
elif self.loss == "als":
self.loss_function = als_loss
else:
self.loss_function = nn.MSELoss()
@staticmethod
def __create_inout_sequences(input_vec, window_size):
inout_list = []
input_size = np.array(input_vec.size())[0]
for i in np.arange(input_size - window_size):
train_seq = input_vec[i: i + window_size]
train_label = input_vec[i + window_size: i + window_size + 1]
inout_list.append((train_seq, train_label))
return inout_list
def __prepare_data(self):
self.rv_vec = np.log(self.rv_vec + 1e-10)
train_test_index = int(0.7 * self.rv_vec.size)
self.rv_train_vec = self.rv_vec[:train_test_index]
self.rv_test_vec = self.rv_vec[train_test_index:]
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.rv_train_scaled_vec = self.scaler.fit_transform(
self.rv_train_vec.reshape(-1, 1)
)
self.rv_test_scaled_vec = self.scaler.transform(self.rv_test_vec.reshape(-1, 1))
self.rv_train_scaled_vec = torch.FloatTensor(self.rv_train_scaled_vec).view(-1)
self.train_list = self.__create_inout_sequences(
self.rv_train_scaled_vec, self.depth
)
return self
def __optimize(self):
self.model = LSTM()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
single_loss = 0
for i_epoch in np.arange(self.n_epochs):
for seq_vec, label in self.train_list:
optimizer.zero_grad()
self.model.hidden_cell = (
torch.zeros(1, 1, self.model.hidden_layer_size),
torch.zeros(1, 1, self.model.hidden_layer_size),
)
pred = self.model(seq_vec)
single_loss = self.loss_function(pred, label)
single_loss.backward()
optimizer.step()
print(f"epoch: {i_epoch:3}, loss: {single_loss.item():10.8f}")
return self
def __predict(self):
self.model.eval()
self.rv_test_list = (
self.rv_train_scaled_vec[-self.depth:].tolist()
+ self.rv_test_scaled_vec.flatten().tolist()
)
self.rv_pred_vec = []
for i_elem in np.arange(self.rv_test_scaled_vec.size):
seq_vec = torch.FloatTensor(self.rv_test_list[i_elem: i_elem + self.depth])
with torch.no_grad():
self.model.hidden = (
torch.zeros(1, 1, self.model.hidden_layer_size),
torch.zeros(1, 1, self.model.hidden_layer_size),
)
self.rv_pred_vec.append(self.model(seq_vec).item())
self.rv_pred_vec = self.scaler.inverse_transform(
np.array(self.rv_pred_vec).reshape(-1, 1)
)
return self
def __error(self):
delta_vec = np.exp(self.rv_test_vec) - np.exp(self.rv_pred_vec)
delta_log_vec = self.rv_test_vec - self.rv_pred_vec
if self.loss == "mse":
abs_error = np.mean(delta_vec ** 2)
log_error = np.mean(delta_log_vec ** 2)
elif self.loss == "linex":
abs_error = np.mean(
np.exp(self.alpha * delta_vec) - self.alpha * delta_vec - 1
)
log_error = np.mean(
np.exp(self.alpha * delta_log_vec) - self.alpha * delta_log_vec - 1
)
elif self.loss == "als":
abs_error = np.mean(
delta_vec ** 2 * np.abs(self.alpha - np.int64(np.less(delta_vec, 0)))
)
log_error = np.mean(
delta_log_vec ** 2
* np.abs(self.alpha - np.int64(np.less(delta_log_vec, 0)))
)
else:
abs_error = None
log_error = None
return (
abs_error,
log_error,
np.exp(self.rv_test_vec),
np.exp(self.rv_pred_vec),
self.rv_test_vec,
self.rv_pred_vec,
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class LstmModModel(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.learning_rate = 0.01
self.tol = 1e-5
self.depth = 10
self.n_epochs = 5
self.loss_function = nn.MSELoss()
@staticmethod
def __create_inout_sequences(input_vec, window_size):
inout_list = []
input_size = np.array(input_vec.size())[0]
for i in np.arange(input_size - window_size):
train_seq = input_vec[i: i + window_size]
train_label = input_vec[i + window_size: i + window_size + 1]
inout_list.append((train_seq, train_label))
return inout_list
def __prepare_data(self):
self.rv_vec = np.log(self.rv_vec**2)
train_test_index = int(0.7 * self.rv_vec.size)
self.rv_train_vec = self.rv_vec[:train_test_index]
self.rv_test_vec = self.rv_vec[train_test_index:]
self.scaler = MinMaxScaler(feature_range=(-1, 1))
self.rv_train_scaled_vec = self.scaler.fit_transform(
self.rv_train_vec.reshape(-1, 1)
)
self.rv_test_scaled_vec = self.scaler.transform(self.rv_test_vec.reshape(-1, 1))
self.rv_train_scaled_vec = torch.FloatTensor(self.rv_train_scaled_vec).view(-1)
self.train_list = self.__create_inout_sequences(
self.rv_train_scaled_vec, self.depth
)
return self
def __optimize(self):
self.model = LSTM()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
single_loss = 0
for i_epoch in np.arange(self.n_epochs):
for seq_vec, label in self.train_list:
optimizer.zero_grad()
self.model.hidden_cell = (
torch.zeros(1, 1, self.model.hidden_layer_size),
torch.zeros(1, 1, self.model.hidden_layer_size),
)
pred = self.model(seq_vec)
single_loss = self.loss_function(pred, label)
single_loss.backward()
optimizer.step()
print(f"epoch: {i_epoch:3}, loss: {single_loss.item():10.8f}")
return self
def __predict(self):
self.model.eval()
self.rv_test_list = (
self.rv_train_scaled_vec[-self.depth:].tolist()
+ self.rv_test_scaled_vec.flatten().tolist()
)
self.rv_pred_vec = []
for i_elem in np.arange(self.rv_test_scaled_vec.size):
seq_vec = torch.FloatTensor(self.rv_test_list[i_elem: i_elem + self.depth])
with torch.no_grad():
self.model.hidden = (
torch.zeros(1, 1, self.model.hidden_layer_size),
torch.zeros(1, 1, self.model.hidden_layer_size),
)
self.rv_pred_vec.append(self.model(seq_vec).item())
self.rv_pred_vec = self.scaler.inverse_transform(
np.array(self.rv_pred_vec).reshape(-1, 1)
)
return self
def __error(self):
delta_rv_vec = self.rv_test_vec - self.rv_pred_vec
delta_RV_vec = np.exp(self.rv_test_vec) - np.exp(self.rv_pred_vec)
rv_error = np.mean(delta_rv_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
return (
rv_error,
RV_error,
self.rv_test_vec,
self.rv_pred_vec,
np.exp(self.rv_test_vec),
np.exp(self.rv_pred_vec),
None,
None
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class LstmModelWithJumps(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.learning_rate = 0.0003
self.tol = 1e-5
self.weight_decay = 0.03
self.depth = 10
self.n_epochs = 5
self.loss_function = nn.MSELoss()
@staticmethod
def __create_inout_sequences(input_c_vec, input_j_vec, window_size):
inout_c_list = []
inout_j_list = []
input_size = np.array(input_c_vec.size())[0]
for i in np.arange(input_size - window_size):
train_seq = torch.FloatTensor(
list(input_c_vec[i: i + window_size])
+ list(input_j_vec[i: i + window_size])
).view(-1)
train_c_label = input_c_vec[i + window_size: i + window_size + 1]
train_j_label = input_j_vec[i + window_size: i + window_size + 1]
inout_c_list.append((train_seq, train_c_label))
inout_j_list.append((train_seq, train_j_label))
return inout_c_list, inout_j_list
def __prepare_data(self):
train_test_index = int(0.7 * self.rv_vec.size)
self.c_train_vec = self.c_vec[:train_test_index]
self.target_c_test_vec = self.c_vec[train_test_index:]
self.j_train_vec = self.j_vec[:train_test_index]
self.target_j_test_vec = self.j_vec[train_test_index:]
self.scaler_c = MinMaxScaler(feature_range=(-1, 1))
self.scaler_j = MinMaxScaler(feature_range=(-1, 1))
self.c_train_scaled_vec = self.scaler_c.fit_transform(
self.c_train_vec.reshape(-1, 1)
)
self.j_train_scaled_vec = self.scaler_j.fit_transform(
self.j_train_vec.reshape(-1, 1)
)
self.c_test_scaled_vec = self.scaler_c.transform(
self.target_c_test_vec.reshape(-1, 1)
)
self.j_test_scaled_vec = self.scaler_j.transform(
self.target_j_test_vec.reshape(-1, 1)
)
self.c_train_scaled_vec = torch.FloatTensor(self.c_train_scaled_vec).view(-1)
self.j_train_scaled_vec = torch.FloatTensor(self.j_train_scaled_vec).view(-1)
self.c_train_list, self.j_train_list = self.__create_inout_sequences(
self.c_train_scaled_vec, self.j_train_scaled_vec, self.depth
)
return self
def __optimize(self):
self.model_c = LSTM()
self.model_j = LSTM()
optimizer_c = torch.optim.Adam(self.model_c.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
optimizer_j = torch.optim.Adam(
self.model_j.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay / 3)
single_loss = 0
for i_epoch in np.arange(self.n_epochs):
for seq_vec, label in self.c_train_list:
optimizer_c.zero_grad()
self.model_c.hidden_cell = (
torch.zeros(1, 1, self.model_c.hidden_layer_size),
torch.zeros(1, 1, self.model_c.hidden_layer_size),
)
pred = self.model_c(seq_vec)
single_loss = self.loss_function(pred, label)
single_loss.backward()
optimizer_c.step()
print(
f"epoch: {i_epoch:3}, loss: {single_loss.item():10.8f}, continuous part"
)
single_loss = 0
for i_epoch in np.arange(self.n_epochs):
for seq_vec, label in self.j_train_list:
optimizer_j.zero_grad()
self.model_j.hidden_cell = (
torch.zeros(1, 1, self.model_j.hidden_layer_size),
torch.zeros(1, 1, self.model_j.hidden_layer_size),
)
pred = self.model_j(seq_vec)
single_loss = self.loss_function(pred, label)
single_loss.backward()
optimizer_j.step()
print(f"epoch: {i_epoch:3}, loss: {single_loss.item():10.8f}, jump part")
return self
def __predict(self):
self.model_c.eval()
self.model_j.eval()
self.c_test_list = (
self.c_train_scaled_vec[-self.depth:].tolist()
+ self.c_test_scaled_vec.flatten().tolist()
)
self.target_c_pred_vec = []
for i_elem in np.arange(self.c_test_scaled_vec.size):
seq_vec = torch.FloatTensor(self.c_test_list[i_elem: i_elem + self.depth])
with torch.no_grad():
self.model_c.hidden = (
torch.zeros(1, 1, self.model_c.hidden_layer_size),
torch.zeros(1, 1, self.model_c.hidden_layer_size),
)
self.target_c_pred_vec.append(self.model_c(seq_vec).item())
self.target_c_pred_vec = self.scaler_c.inverse_transform(
np.array(self.target_c_pred_vec).reshape(-1, 1)
)
self.j_test_list = (
self.j_train_scaled_vec[-self.depth:].tolist()
+ self.j_test_scaled_vec.flatten().tolist()
)
self.target_j_pred_vec = []
for i_elem in np.arange(self.j_test_scaled_vec.size):
seq_vec = torch.FloatTensor(self.j_test_list[i_elem: i_elem + self.depth])
with torch.no_grad():
self.model_j.hidden = (
torch.zeros(1, 1, self.model_j.hidden_layer_size),
torch.zeros(1, 1, self.model_j.hidden_layer_size),
)
self.target_j_pred_vec.append(self.model_j(seq_vec).item())
self.target_j_pred_vec = self.scaler_j.inverse_transform(
np.array(self.target_j_pred_vec).reshape(-1, 1)
)
return self
def __error(self):
delta_c_vec = self.target_c_test_vec - self.target_c_pred_vec
delta_j_vec = self.target_j_test_vec - self.target_j_pred_vec
delta_C_vec = np.exp(self.target_c_test_vec) - np.exp(self.target_c_pred_vec)
delta_J_vec = np.exp(self.target_j_test_vec) - np.exp(self.target_j_pred_vec)
delta_RV_vec = delta_C_vec + delta_J_vec
self.rv_pred_vec = np.log(np.exp(self.target_c_pred_vec) + np.exp(self.target_j_pred_vec) - 1)
self.rv_test_vec = np.log(np.exp(self.target_c_test_vec) + np.exp(self.target_j_test_vec) - 1)
delta_rv_vec = self.rv_test_vec - self.rv_pred_vec
c_error = np.mean(delta_c_vec ** 2)
j_error = np.mean(delta_j_vec ** 2)
C_error = np.mean(delta_C_vec ** 2)
J_error = np.mean(delta_J_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
rv_error = np.mean(delta_rv_vec ** 2)
return (
c_error,
j_error,
C_error,
J_error,
RV_error,
rv_error,
self.target_c_test_vec,
self.target_c_pred_vec,
self.target_j_test_vec,
self.target_j_pred_vec,
np.exp(self.target_c_test_vec),
np.exp(self.target_c_pred_vec),
np.exp(self.target_j_test_vec) - 1,
np.exp(self.target_j_pred_vec) - 1,
self.rv_test_vec,
self.rv_pred_vec,
None,
None,
None
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class RandForestModelWithJumps(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.learning_rate = 0.01
self.tol = 1e-5
self.depth = 10
self.n_epochs = 150
@staticmethod
def __split_sequence(seq_c_vec, seq_j_vec, n_steps):
data_mat, target_c_vec, target_j_vec = list(), list(), list()
for idx in np.arange(seq_c_vec.size):
end_ix = idx + n_steps
if end_ix > len(seq_c_vec) - 1:
break
seq_cx, seq_cy, seq_jx, seq_jy = (
seq_c_vec[idx:end_ix],
seq_c_vec[end_ix],
seq_j_vec[idx:end_ix],
seq_j_vec[end_ix],
)
data_mat.append(list(seq_cx) + list(seq_jx))
target_c_vec.append(seq_cy)
target_j_vec.append(seq_jy)
return np.array(data_mat), np.array(target_c_vec), np.array(target_j_vec)
def __prepare_data(self):
self.feat_mat, self.target_c_vec, self.target_j_vec = self.__split_sequence(
self.c_vec, self.j_vec, self.depth
)
train_test_index = int(0.7 * self.target_c_vec.size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_c_train_vec = self.target_c_vec[:train_test_index]
self.target_c_test_vec = self.target_c_vec[train_test_index:]
self.target_j_train_vec = self.target_j_vec[:train_test_index]
self.target_j_test_vec = self.target_j_vec[train_test_index:]
return self
def __optimize(self):
self.model_c = RandomForestRegressor(random_state=0)
self.model_j = RandomForestRegressor(random_state=42)
self.model_c.fit(self.feat_train_mat, self.target_c_train_vec)
self.model_j.fit(self.feat_train_mat, self.target_j_train_vec)
return self
def __predict(self):
self.target_c_pred_vec = self.model_c.predict(self.feat_test_mat)
self.target_j_pred_vec = self.model_j.predict(self.feat_test_mat)
return self
def __error(self):
delta_c_vec = self.target_c_test_vec - self.target_c_pred_vec
delta_j_vec = self.target_j_test_vec - self.target_j_pred_vec
delta_C_vec = np.exp(self.target_c_test_vec) - np.exp(self.target_c_pred_vec)
delta_J_vec = np.exp(self.target_j_test_vec) - np.exp(self.target_j_pred_vec)
delta_RV_vec = delta_C_vec + delta_J_vec
self.rv_pred_vec = np.log(np.exp(self.target_c_pred_vec) + np.exp(self.target_j_pred_vec) - 1)
self.rv_test_vec = np.log(np.exp(self.target_c_test_vec) + np.exp(self.target_j_test_vec) - 1)
delta_rv_vec = self.rv_test_vec - self.rv_pred_vec
c_error = np.mean(delta_c_vec ** 2)
j_error = np.mean(delta_j_vec ** 2)
C_error = np.mean(delta_C_vec ** 2)
J_error = np.mean(delta_J_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
rv_error = np.mean(delta_rv_vec ** 2)
return (
c_error,
j_error,
C_error,
J_error,
RV_error,
rv_error,
self.target_c_test_vec,
self.target_c_pred_vec,
self.target_j_test_vec,
self.target_j_pred_vec,
np.exp(self.target_c_test_vec),
np.exp(self.target_c_pred_vec),
np.exp(self.target_j_test_vec) - 1,
np.exp(self.target_j_pred_vec) - 1,
self.rv_test_vec,
self.rv_pred_vec,
self.model_c,
self.model_j,
self.feat_test_mat,
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class HarModelWithJumps(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.MONTH = 22
self.WEEK = 5
self.DAY = 1
self.learning_rate = 0.0001
self.tol = 1e-6
def __prepare_data(self):
self.c_month_vec = (
np.convolve(self.c_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.sample_size = self.c_month_vec.size
self.c_week_vec = (
np.convolve(self.c_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.c_day_vec = self.c_vec[-self.sample_size - 1: -1]
self.j_month_vec = (
np.convolve(self.j_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.j_week_vec = (
np.convolve(self.j_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.j_day_vec = self.j_vec[-self.sample_size - 1: -1]
self.feat_mat = np.stack(
[
np.ones(shape=self.sample_size),
self.c_day_vec,
self.c_week_vec,
self.c_month_vec,
self.j_day_vec,
self.j_week_vec,
self.j_month_vec,
]
).T
self.target_c_vec = self.c_vec[-self.sample_size:]
self.target_j_vec = self.j_vec[-self.sample_size:]
train_test_index = int(0.7 * self.sample_size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_c_train_vec = self.target_c_vec[:train_test_index]
self.target_c_test_vec = self.target_c_vec[train_test_index:]
self.target_j_train_vec = self.target_j_vec[:train_test_index]
self.target_j_test_vec = self.target_j_vec[train_test_index:]
self.init_mean = self.feat_train_mat.mean(axis=0)
self.init_var = self.feat_train_mat.var(axis=0, ddof=1)
self.start_vec = np.array([self.init_mean[0] * 0.01, 0.9, 0.1, 0.1, 0.9, 0.1, 0.1])
self.weight_c_vec = self.start_vec
self.weight_j_vec = self.start_vec
return self
def __gradient(self, is_cont=True):
target_est_vec = self.feat_train_mat @ (
self.weight_c_vec if is_cont else self.weight_j_vec
)
delta_vec = np.reshape(
(self.target_c_train_vec.flatten() if is_cont else self.target_j_train_vec.flatten())
- target_est_vec,
newshape=(-1, 1),
)
grad_vec = -2 * self.feat_train_mat.T @ delta_vec / self.feat_train_mat.shape[0]
error = np.sum(delta_vec ** 2)
return grad_vec, error
def __optimize(self):
iteration = 0
while True:
iteration += 1
grad_vec, delta = self.__gradient(is_cont=True)
if iteration % 1000 == 0:
print(f"Iteration: {iteration}, loss: {delta}, continuous part")
grad_vec = grad_vec.flatten()
weight_c_vec = self.weight_c_vec - self.learning_rate * grad_vec
if np.sum(np.abs(weight_c_vec - self.weight_c_vec)) < self.tol:
self.estimate_c_vec = weight_c_vec
break
self.weight_c_vec = weight_c_vec
iteration = 0
while True:
iteration += 1
grad_vec, delta = self.__gradient(is_cont=False)
if iteration % 1000 == 0:
print(f"Iteration: {iteration}, loss: {delta}, jump part")
grad_vec = grad_vec.flatten()
weight_j_vec = self.weight_j_vec - self.learning_rate * 10 * grad_vec
if np.sum(np.abs(weight_j_vec - self.weight_j_vec)) < self.tol / 10000:
self.estimate_j_vec = weight_j_vec
return self
self.weight_j_vec = weight_j_vec
def __predict(self):
self.target_c_pred_vec = (self.feat_test_mat @ self.weight_c_vec).flatten()
self.target_j_pred_vec = (self.feat_test_mat @ self.weight_j_vec).flatten()
return self
def __error(self):
delta_c_vec = self.target_c_test_vec - self.target_c_pred_vec
delta_j_vec = self.target_j_test_vec - self.target_j_pred_vec
delta_C_vec = np.exp(self.target_c_test_vec) - np.exp(self.target_c_pred_vec)
delta_J_vec = np.exp(self.target_j_test_vec) - np.exp(self.target_j_pred_vec)
delta_rv_vec = delta_C_vec + delta_J_vec
c_error = np.mean(delta_c_vec ** 2)
j_error = np.mean(delta_j_vec ** 2)
C_error = np.mean(delta_C_vec ** 2)
J_error = np.mean(delta_J_vec ** 2)
rv_error = np.mean(delta_rv_vec ** 2)
return (
c_error,
j_error,
C_error,
J_error,
rv_error,
self.target_c_test_vec,
self.target_c_pred_vec,
self.target_j_test_vec,
self.target_j_pred_vec,
np.exp(self.target_c_test_vec),
np.exp(self.target_c_pred_vec),
np.exp(self.target_j_test_vec) - 1,
np.exp(self.target_j_pred_vec) - 1,
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class HarModelOLS(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.MONTH = 22
self.WEEK = 5
self.DAY = 1
def __prepare_data(self):
self.rv_month_vec = (
np.convolve(np.log(self.rv_vec**2), np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.sample_size = self.rv_month_vec.size
self.rv_week_vec = (
np.convolve(np.log(self.rv_vec**2), np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.rv_day_vec = np.log(self.rv_vec[-self.sample_size - 1: -1] ** 2)
self.feat_mat = np.stack(
[
np.ones(shape=self.sample_size),
self.rv_day_vec,
self.rv_week_vec,
self.rv_month_vec,
]
).T
self.target_vec = np.log(self.rv_vec[-self.sample_size:]**2)
train_test_index = int(0.7 * self.sample_size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_train_vec = self.target_vec[:train_test_index]
self.target_test_vec = self.target_vec[train_test_index:]
return self
def __optimize(self):
self.model = sm.OLS(self.target_train_vec, self.feat_train_mat).fit()
return self
def __predict(self):
self.target_pred_vec = self.model.predict(self.feat_test_mat)
return self
def __error(self):
delta_rv_vec = self.target_test_vec - self.target_pred_vec
delta_RV_vec = np.exp(self.target_test_vec) - np.exp(self.target_pred_vec)
rv_error = np.mean(delta_rv_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
return (
rv_error,
RV_error,
self.target_test_vec,
self.target_pred_vec,
np.exp(self.target_test_vec),
np.exp(self.target_pred_vec),
self.model,
None
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class HarCJModelOLS(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.MONTH = 22
self.WEEK = 5
self.DAY = 1
def __prepare_data(self):
self.c_month_vec = (
np.convolve(self.c_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.sample_size = self.c_month_vec.size
self.c_week_vec = (
np.convolve(self.c_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.c_day_vec = self.c_vec[-self.sample_size - 1: -1]
self.j_month_vec = (
np.convolve(self.j_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.j_week_vec = (
np.convolve(self.j_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.j_day_vec = self.j_vec[-self.sample_size - 1: -1]
self.feat_mat = np.stack(
[
np.ones(shape=self.sample_size),
self.c_day_vec,
self.c_week_vec,
self.c_month_vec,
self.j_day_vec,
self.j_week_vec,
self.j_month_vec,
]
).T
self.target_vec = np.log(self.rv_vec[-self.sample_size:]**2)
train_test_index = int(0.7 * self.sample_size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_train_vec = self.target_vec[:train_test_index]
self.target_test_vec = self.target_vec[train_test_index:]
return self
def __optimize(self):
self.model = sm.OLS(self.target_train_vec, self.feat_train_mat).fit()
return self
def __predict(self):
self.target_pred_vec = self.model.predict(self.feat_test_mat)
return self
def __error(self):
delta_rv_vec = self.target_test_vec - self.target_pred_vec
delta_RV_vec = np.exp(self.target_test_vec) - np.exp(self.target_pred_vec)
rv_error = np.mean(delta_rv_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
return (
rv_error,
RV_error,
self.target_test_vec,
self.target_pred_vec,
np.exp(self.target_test_vec),
np.exp(self.target_pred_vec),
self.model,
None
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class HarCJModModelOLS(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.MONTH = 22
self.WEEK = 5
self.DAY = 1
def __prepare_data(self):
self.c_month_vec = (
np.convolve(self.c_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.sample_size = self.c_month_vec.size
self.c_week_vec = (
np.convolve(self.c_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.c_day_vec = self.c_vec[-self.sample_size - 1: -1]
self.j_month_vec = (
np.convolve(self.j_vec, np.ones(self.MONTH, dtype=int), "valid")[:-1]
/ self.MONTH
)
self.j_week_vec = (
np.convolve(self.j_vec, np.ones(self.WEEK, dtype=int), "valid")[
-self.sample_size - 1: -1
]
/ self.WEEK
)
self.j_day_vec = self.j_vec[-self.sample_size - 1: -1]
self.feat_mat = np.stack(
[
np.ones(shape=self.sample_size),
self.c_day_vec,
self.c_week_vec,
self.c_month_vec,
self.j_day_vec,
self.j_week_vec,
self.j_month_vec,
]
).T
self.target_c_vec = self.c_vec[-self.sample_size:]
self.target_j_vec = self.j_vec[-self.sample_size:]
train_test_index = int(0.7 * self.sample_size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_c_train_vec = self.target_c_vec[:train_test_index]
self.target_c_test_vec = self.target_c_vec[train_test_index:]
self.target_j_train_vec = self.target_j_vec[:train_test_index]
self.target_j_test_vec = self.target_j_vec[train_test_index:]
return self
def __optimize(self):
self.model_c = sm.OLS(self.target_c_train_vec, self.feat_train_mat).fit()
self.model_j = sm.OLS(self.target_j_train_vec, self.feat_train_mat).fit()
return self
def __predict(self):
self.target_c_pred_vec = self.model_c.predict(self.feat_test_mat)
self.target_j_pred_vec = self.model_j.predict(self.feat_test_mat)
return self
def __error(self):
delta_c_vec = self.target_c_test_vec - self.target_c_pred_vec
delta_j_vec = self.target_j_test_vec - self.target_j_pred_vec
delta_C_vec = np.exp(self.target_c_test_vec) - np.exp(self.target_c_pred_vec)
delta_J_vec = np.exp(self.target_j_test_vec) - np.exp(self.target_j_pred_vec)
delta_RV_vec = delta_C_vec + delta_J_vec
self.rv_pred_vec = np.log(np.exp(self.target_c_pred_vec) + np.exp(np.maximum(self.target_j_pred_vec, 0)) - 1)
self.rv_test_vec = np.log(np.exp(self.target_c_test_vec) + np.exp(np.maximum(self.target_j_test_vec, 0)) - 1)
delta_rv_vec = self.rv_test_vec - self.rv_pred_vec
c_error = np.mean(delta_c_vec ** 2)
j_error = np.mean(delta_j_vec ** 2)
C_error = np.mean(delta_C_vec ** 2)
J_error = np.mean(delta_J_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
rv_error = np.mean(delta_rv_vec ** 2)
return (
c_error,
j_error,
C_error,
J_error,
RV_error,
rv_error,
self.target_c_test_vec,
self.target_c_pred_vec,
self.target_j_test_vec,
self.target_j_pred_vec,
np.exp(self.target_c_test_vec),
np.exp(self.target_c_pred_vec),
np.exp(self.target_j_test_vec) - 1,
np.exp(self.target_j_pred_vec) - 1,
self.rv_test_vec,
self.rv_pred_vec,
self.model_c,
self.model_j,
None
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
class RandForestModel(BaseModel):
def __init__(self, data_path):
super().__init__(data_path)
self.learning_rate = 0.01
self.tol = 1e-5
self.depth = 10
self.n_epochs = 150
@staticmethod
def __split_sequence(seq_vec, n_steps):
data_mat, target_vec = list(), list()
for idx in np.arange(seq_vec.size):
end_ix = idx + n_steps
if end_ix > len(seq_vec) - 1:
break
seq_x, seq_y = seq_vec[idx:end_ix], seq_vec[end_ix]
data_mat.append(seq_x)
target_vec.append(seq_y)
return np.array(data_mat), np.array(target_vec)
def __prepare_data(self):
self.feat_mat, self.target_vec = self.__split_sequence(np.log(self.rv_vec**2), self.depth)
train_test_index = int(0.7 * self.target_vec.size)
self.feat_train_mat = self.feat_mat[:train_test_index]
self.feat_test_mat = self.feat_mat[train_test_index:]
self.target_train_vec = self.target_vec[:train_test_index]
self.target_test_vec = self.target_vec[train_test_index:]
return self
def __optimize(self):
self.model = RandomForestRegressor(random_state=0)
self.model.fit(self.feat_train_mat, self.target_train_vec)
return self
def __predict(self):
self.target_pred_vec = self.model.predict(self.feat_test_mat)
return self
def __error(self):
delta_rv_vec = self.target_test_vec - self.target_pred_vec
delta_RV_vec = np.exp(self.target_test_vec) - np.exp(self.target_pred_vec)
rv_error = np.mean(delta_rv_vec ** 2)
RV_error = np.mean(delta_RV_vec ** 2)
return (
rv_error,
RV_error,
self.target_test_vec,
self.target_pred_vec,
np.exp(self.target_test_vec),
np.exp(self.target_pred_vec),
self.model,
self.feat_test_mat,
)
def estimate(self):
return self.__prepare_data().__optimize().__predict().__error()
if __name__ == "__main__":
pass
| StarcoderdataPython |
1949690 | <filename>app.py<gh_stars>1-10
#!/usr/bin/env python3
"""
Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
with the License. A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions
and limitations under the License.
"""
from aws_cdk import (
aws_ec2 as ec2,
core,
)
from redshift_benchmark.lib.cdkVPCStack import VPCStack
from redshift_benchmark.lib.cdkRedshiftStack import RedshiftStack
from redshift_benchmark.lib.cdkInitialAssets import S3Assets
from redshift_benchmark.redshiftBenchmarkStack import RedshiftBenchmarkStack
app = core.App()
#################### Upload scripts to S3 that could be inferred by following tasks ######################
asset = S3Assets(app, "repository",local_directory="scripts")
############ Set up VPC and redshift cluster, redshift cluster will reside in public subnet ##############
vpc_stack = VPCStack(app,"vpc-stack")
redshift_stack = RedshiftStack(app,"redshift-stack",vpc_stack)
# Use glue workflow and jobs to conduct benchmark tasks include parallel query execution and concurrent query execution
benchmark_workflow = RedshiftBenchmarkStack(app,"benchmark-workflow"
,dbname=redshift_stack.get_cluster.db_name
,host=redshift_stack.get_cluster.attr_endpoint_address
,port=redshift_stack.get_cluster.attr_endpoint_port
,username=redshift_stack.get_cluster.master_username
,password=redshift_stack.get_cluster.master_user_password
,s3_bucket=asset.get_bucket
,rs_role_arn=redshift_stack.get_role_arn
)
app.synth()
| StarcoderdataPython |
8084593 | import os, glob, gzip, sys
from subprocess import call
from requests_html import HTMLSession
def check_existing(save_loc, acc):
"""
Function to check for single- or paired-end reads
in a given `save_loc` for a particular `acc`ession.
Returns "paired" if paired reads found, "single" if
unpaired reads found, "both" if single- and paired-
end reads found, and False if nothing matching that
accession was found.
"""
if save_loc == '':
loc_to_search = os.getcwd()
else:
loc_to_search = save_loc
try:
existing = [f for f in os.listdir(loc_to_search) if f.endswith('fastq.gz')]
except FileNotFoundError:
return False
paired = False
unpaired = False
for f in existing:
if acc + '.fastq.gz' in f:
unpaired = True
if (acc + '_1.fastq.gz' in f) or (acc + '_2.fastq.gz' in f):
paired = True
if unpaired == True and paired == True:
return "both"
elif paired == True:
return "paired"
elif unpaired == True:
return "unpaired"
else:
return False
def gzip_files(paths, tool="gzip", threads=1):
"""
Zips files at one or more `paths` using specified `tool`.
Returns the command-line tool's return code.
"""
if type(paths) != type(["list'o'strings"]):
paths = [paths]
validated_paths = []
for p in paths:
if os.path.isfile(p):
validated_paths.append(p)
if tool == "gzip":
retcode = call(["gzip -f " + ' '.join(validated_paths)], shell=True)
elif tool == "pigz":
retcode = call(["pigz -f -p "+ str(threads) + ' ' + ' '.join(validated_paths)], shell=True)
else:
print("Unrecognized tool "+tool+" specified: cannot compress ", validated_paths)
sys.exit(1)
return retcode
def fetch_file(url, outfile, retries = 0):
"""
Function to fetch a remote file from a `url`,
writing to `outfile` with a particular number of
`retries`.
"""
wget_cmd = ["wget", "-O", outfile, url]
retcode = call(wget_cmd)
return retcode
def build_paths(acc, loc, paired, ext = ".fastq"):
"""
Builds paths for saving downloaded files from a given
`acc` in a particular `loc`, depending on whether or
not they are `paired`. Can specify any `ext`. Returns
a list of paths of length 1 or 2.
"""
if paired:
suffix = ["_1", "_2"]
else:
suffix = [""]
return [os.path.join(loc,acc+s+ext) for s in suffix]
def check_filetype(fp):
"""
Function to classify downloaded files as gzipped or not,
and in FASTQ, FASTA, or not based on contents. Returns a
formatted extension (i.e. '.fastq', 'fasta.gz') corresponding
to the filetype or an empty string if the filetype is not
recognized.
"""
try:
f = gzip.open(fp)
first_b = f.readline()
gz = ".gz"
first = first_b.decode("ascii")
except OSError: # file not gzipped
f.close()
f = open(fp, 'r')
first = f.readline()
f.close()
gz = ""
if len(first) == 0:
return ""
if first[0] == ">":
return "fasta"+gz
elif first[0] == "@":
return "fastq"+gz
else:
return ""
def fasta_to_fastq(fp_fa, fp_fq, zipped, dummy_char = "I"):
"""
Function to convert fasta (at `fp_fa`) to fastq (at `fp_fq`)
possibly zipped, adding a `dummy_score`.
"""
if len(dummy_char) != 1:
raise Exception("FASTQ dummy quality char must be only one char.")
fq = open(fp_fq, 'w')
seq = -1
if zipped:
f = gzip(fp_fa)
else:
f = open(fp_fa)
for line in f.readlines():
if line[0] == '>':
if seq == -1:
fq.write('@'+line[1:])
else:
fq.write(seq+'\n')
fq.write('+\n')
fq.write(dummy_char*len(seq)+'\n')
fq.write('@'+line[1:])
seq = ''
else:
seq += line.strip()
f.close()
if len(seq) > 0:
fq.write(seq+'\n')
fq.write('+\n')
fq.write(dummy_char*len(seq)+'\n')
fq.close()
| StarcoderdataPython |
9656035 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
import bpy
from bpy.types import Operator
from bpy.props import (
EnumProperty,
IntProperty,
)
class MeshMirrorUV(Operator):
"""Copy mirror UV coordinates on the X axis based on a mirrored mesh"""
bl_idname = "mesh.faces_mirror_uv"
bl_label = "Copy Mirrored UV Coords"
bl_options = {'REGISTER', 'UNDO'}
direction: EnumProperty(
name="Axis Direction",
items=(
('POSITIVE', "Positive", ""),
('NEGATIVE', "Negative", ""),
),
)
precision: IntProperty(
name="Precision",
description=("Tolerance for finding vertex duplicates"),
min=1, max=16,
soft_min=1, soft_max=16,
default=3,
)
# Returns has_active_UV_layer, double_warn.
def do_mesh_mirror_UV(self, mesh, DIR):
precision = self.precision
double_warn = 0
if not mesh.uv_layers.active:
# has_active_UV_layer, double_warn
return False, 0
# mirror lookups
mirror_gt = {}
mirror_lt = {}
vcos = (v.co.to_tuple(precision) for v in mesh.vertices)
for i, co in enumerate(vcos):
if co[0] >= 0.0:
double_warn += co in mirror_gt
mirror_gt[co] = i
if co[0] <= 0.0:
double_warn += co in mirror_lt
mirror_lt[co] = i
vmap = {}
for mirror_a, mirror_b in ((mirror_gt, mirror_lt),
(mirror_lt, mirror_gt)):
for co, i in mirror_a.items():
nco = (-co[0], co[1], co[2])
j = mirror_b.get(nco)
if j is not None:
vmap[i] = j
polys = mesh.polygons
loops = mesh.loops
uv_loops = mesh.uv_layers.active.data
nbr_polys = len(polys)
mirror_pm = {}
pmap = {}
puvs = [None] * nbr_polys
puvs_cpy = [None] * nbr_polys
puvsel = [None] * nbr_polys
pcents = [None] * nbr_polys
vidxs = [None] * nbr_polys
for i, p in enumerate(polys):
lstart = lend = p.loop_start
lend += p.loop_total
puvs[i] = tuple(uv.uv for uv in uv_loops[lstart:lend])
puvs_cpy[i] = tuple(uv.copy() for uv in puvs[i])
puvsel[i] = (False not in
(uv.select for uv in uv_loops[lstart:lend]))
# Vert idx of the poly.
vidxs[i] = tuple(l.vertex_index for l in loops[lstart:lend])
pcents[i] = p.center
# Preparing next step finding matching polys.
mirror_pm[tuple(sorted(vidxs[i]))] = i
for i in range(nbr_polys):
# Find matching mirror poly.
tvidxs = [vmap.get(j) for j in vidxs[i]]
if None not in tvidxs:
tvidxs.sort()
j = mirror_pm.get(tuple(tvidxs))
if j is not None:
pmap[i] = j
for i, j in pmap.items():
if not puvsel[i] or not puvsel[j]:
continue
elif DIR == 0 and pcents[i][0] < 0.0:
continue
elif DIR == 1 and pcents[i][0] > 0.0:
continue
# copy UVs
uv1 = puvs[i]
uv2 = puvs_cpy[j]
# get the correct rotation
v1 = vidxs[j]
v2 = tuple(vmap[k] for k in vidxs[i])
if len(v1) == len(v2):
for k in range(len(v1)):
k_map = v1.index(v2[k])
uv1[k].xy = - (uv2[k_map].x - 0.5) + 0.5, uv2[k_map].y
# has_active_UV_layer, double_warn
return True, double_warn
@classmethod
def poll(cls, context):
obj = context.view_layer.objects.active
return (obj and obj.type == 'MESH')
def execute(self, context):
DIR = (self.direction == 'NEGATIVE')
total_no_active_UV = 0
total_duplicates = 0
meshes_with_duplicates = 0
ob = context.view_layer.objects.active
is_editmode = (ob.mode == 'EDIT')
if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
meshes = [ob.data for ob in context.view_layer.objects.selected
if ob.type == 'MESH' and ob.data.library is None]
for mesh in meshes:
mesh.tag = False
for mesh in meshes:
if mesh.tag:
continue
mesh.tag = True
has_active_UV_layer, double_warn = self.do_mesh_mirror_UV(mesh, DIR)
if not has_active_UV_layer:
total_no_active_UV = total_no_active_UV + 1
elif double_warn:
total_duplicates += double_warn
meshes_with_duplicates = meshes_with_duplicates + 1
if is_editmode:
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
if total_duplicates and total_no_active_UV:
self.report({'WARNING'}, "%d %s with no active UV layer. "
"%d duplicates found in %d %s, mirror may be incomplete."
% (total_no_active_UV,
"mesh" if total_no_active_UV == 1 else "meshes",
total_duplicates,
meshes_with_duplicates,
"mesh" if meshes_with_duplicates == 1 else "meshes"))
elif total_no_active_UV:
self.report({'WARNING'}, "%d %s with no active UV layer."
% (total_no_active_UV,
"mesh" if total_no_active_UV == 1 else "meshes"))
elif total_duplicates:
self.report({'WARNING'}, "%d duplicates found in %d %s,"
" mirror may be incomplete."
% (total_duplicates,
meshes_with_duplicates,
"mesh" if meshes_with_duplicates == 1 else "meshes"))
return {'FINISHED'}
class MeshSelectNext(Operator):
"""Select the next element (using selection order)"""
bl_idname = "mesh.select_next_item"
bl_label = "Select Next Element"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return (context.mode == 'EDIT_MESH')
def execute(self, context):
import bmesh
from .bmesh import find_adjacent
obj = context.active_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
if find_adjacent.select_next(bm, self.report):
bm.select_flush_mode()
bmesh.update_edit_mesh(me, False)
return {'FINISHED'}
class MeshSelectPrev(Operator):
"""Select the previous element (using selection order)"""
bl_idname = "mesh.select_prev_item"
bl_label = "Select Previous Element"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return (context.mode == 'EDIT_MESH')
def execute(self, context):
import bmesh
from .bmesh import find_adjacent
obj = context.active_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
if find_adjacent.select_prev(bm, self.report):
bm.select_flush_mode()
bmesh.update_edit_mesh(me, False)
return {'FINISHED'}
classes = (
MeshMirrorUV,
MeshSelectNext,
MeshSelectPrev,
)
| StarcoderdataPython |
11394454 | from django.db import models
import datetime
# Create your models here.
YEAR_CHOICES = []
for r in range(1980, (datetime.datetime.now().year+1)):
YEAR_CHOICES.append((r,r))
class Publisher(models.Model):
name = models.CharField('Name', max_length=30, primary_key=True)
city = models.CharField('City', max_length=30)
country = models.CharField('Country', max_length=30)
president = models.CharField('President', max_length=30)
yearFounded = models.IntegerField('Year', choices=YEAR_CHOICES)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('publisher_edit', kwargs={'pk': self.pk})
class Author(models.Model):
authorNumber = models.CharField('Author Number', max_length=30, primary_key=True)
name = models.CharField('Name', max_length=30)
bornYear = models.IntegerField('Born Year', choices=YEAR_CHOICES)
diedYear = models.IntegerField('Died Year', choices=YEAR_CHOICES, null=True, blank=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('author_edit', kwargs={'pk': self.pk})
class Book(models.Model):
bookNumber = models.CharField('Book Number', max_length=30, primary_key=True)
name = models.CharField('Name', max_length=50)
publicationYear = models.IntegerField('Publication Year', choices=YEAR_CHOICES)
pages = models.IntegerField('Pages')
publication = models.ForeignKey(to=Publisher,on_delete=models.CASCADE,null=False,blank=False)
author = models.ManyToManyField(Author)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('book_edit', kwargs={'pk': self.pk})
class Customer(models.Model):
customerNumber = models.CharField('Customer Number', max_length=30, primary_key=True)
name = models.CharField('Name', max_length=30)
street = models.CharField('Street', max_length=30)
city = models.CharField('City', max_length=30)
state = models.CharField('State', max_length=30)
country = models.CharField('Country', max_length=30)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('customer_edit', kwargs={'pk': self.pk})
class Sale(models.Model):
customer = models.ForeignKey(to=Customer,on_delete=models.CASCADE,null=False,blank=False)
book = models.ForeignKey(to=Book, on_delete=models.CASCADE,null=False,blank=False)
date = models.DateField('Date')
price = models.DecimalField('Price', decimal_places=2, max_digits=8)
quantity = models.PositiveIntegerField('Quantity')
class Meta:
ordering = ['date']
unique_together = (("customer", "book"),)
def __str__(self):
return str(self.id)
def get_absolute_url(self):
return reverse('sale_edit', kwargs={'pk': self.pk}) | StarcoderdataPython |
85725 | <reponame>avara1986/avara
from django.conf.urls import patterns, include, url
from django.contrib import admin
from avara import settings
from avara.routers import router
admin.autodiscover()
urlpatterns = patterns('',
url(r'^_ah/', include('djangae.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/v1/', include(router.urls)),
url(r'^', include('website.urls')),
)
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
)
| StarcoderdataPython |
8154588 | <gh_stars>1-10
## ____ _ ____
## / ___|__ _ ___| |_ _ _ ___ / ___|__ _ _ __ _ _ ___ _ __
## | | / _` |/ __| __| | | / __| | | / _` | '_ \| | | |/ _ \| '_ \
## | |__| (_| | (__| |_| |_| \__ \ | |__| (_| | | | | |_| | (_) | | | |
## \____\__,_|\___|\__|\__,_|___/ \____\__,_|_| |_|\__, |\___/|_| |_|
## |___/
## ___ ___ _ _ _____ ___ _ _ _ _ ___ ___
## / __/ _ \| \| |_ _|_ _| \| | | | | __| \
## | (_| (_) | .` | | | | || .` | |_| | _|| |) |
## \___\___/|_|\_| |_| |___|_|\_|\___/|___|___/
##
## A P-ROC Project by <NAME>, Copyright 2012-2013
## Built on the PyProcGame Framework from <NAME> and <NAME>
## Original Cactus Canyon software by <NAME>
##
##
## The idea here is to have a really high priority layer that can but in over the top
## of the regular display for things that are important
##
from procgame import dmd
import ep
import random
class Interrupter(ep.EP_Mode):
"""Cactus Canyon Interrupter Jones"""
def __init__(self, game, priority):
super(Interrupter, self).__init__(game, priority)
self.myID = "Interrupter Jones"
self.rotator = [True,False,False,False,False]
self.statusDisplay = "Off"
self.page = 0
self.playing = False
self.hush = False
self.knockerStrength = self.game.user_settings['Machine (Standard)']['Real Knocker Strength']
self.idle = False
self.keys_index = {'shoot_again':list(range(len(self.game.sound.sounds[self.game.assets.quote_shootAgain])))}
self.counts_index = {'shoot_again':0}
random.shuffle(self.keys_index['shoot_again'])
def display_player_number(self,idle=False):
# if the skillshot display is busy, we don't trample on it
if not self.game.skill_shot.busy:
# for when the ball is sitting in the shooter lane with nothing going on
myNumber = ("ONE","TWO","THREE","FOUR")
# get the current player
p = self.game.current_player_index
# set up the text
textString = "PLAYER> " + myNumber[p]
textLayer = ep.EP_TextLayer(128/2, 7, self.game.assets.font_12px_az_outline, "center", opaque=False)
textLayer.composite_op = "blacksrc"
textLayer.set_text(textString)
script = [{'seconds':0.3,'layer':textLayer},{'seconds':0.3,'layer':None}]
display = dmd.ScriptedLayer(128,32,script)
display.composite_op = "blacksrc"
# turn the display on
self.layer = display
# every fifth time razz them
if self.rotator[0]:
self.game.base.play_quote(self.game.assets.quote_dontJustStandThere)
# then stick the current value on the end
foo = self.rotator.pop(0)
self.rotator.append(foo)
## then shift 0 to the end
self.delay(name="Display",delay=1.5,handler=self.clear_layer)
# with an idle call, set a repeat
if idle:
self.idle = True
self.delay(name="idle",delay=15,handler=self.display_player_number,param=True)
def cancel_idle(self):
self.idle = False
self.cancel_delayed("idle")
def abort_player_number(self):
if self.idle:
self.cancel_delayed("Display")
self.cancel_delayed("idle")
self.idle = False
self.layer = None
def score_overlay(self,points,multiplier,textColor):
# points is the shot value, multiplier is the active combo multiplier
textLayer = ep.EP_TextLayer(128/2, 24, self.game.assets.font_6px_az_inverse, "center", opaque=False)
string = "< " + str(ep.format_score(points))
if multiplier > 1:
string = string + " X " + str(multiplier)
string = string + " >"
textLayer.set_text(string,color=textColor)
self.layer = textLayer
self.delay("Display",delay=1.5,handler=self.clear_layer)
def tilt_danger(self,status):
self.cancel_delayed("Display")
# if it puts us at 2, time for second warning
if status == 2:
#print "DANGER DANGER"
# double warning
line1 = ep.EP_TextLayer(128/2, 1, self.game.assets.font_dangerFont, "center", opaque=False).set_text("D A N G E R",color=ep.RED)
line1.composite_op = "blacksrc"
line2 = ep.EP_TextLayer(128/2, 16, self.game.assets.font_dangerFont, "center", opaque=False).set_text("D A N G E R",color=ep.RED)
line2.composite_op = "blacksrc"
combined = dmd.GroupedLayer(128,32,[line1,line2])
combined.composite_op = "blacksrc"
self.layer = combined
# play a sound
myWait = self.play_tilt_sound()
self.delay(delay=0.5,handler=self.play_tilt_sound)
self.delay("Display",delay=1,handler=self.clear_layer)
# otherwise this must be the first warning
else:
#print "Display"
#add a display layer and add a delayed removal of it.
if status > 2:
string = "DANGER X " + str(status)
else:
string = "<NAME>"
line1 = ep.EP_TextLayer(128/2, 10, self.game.assets.font_dangerFont, "center", opaque=False).set_text(string,color=ep.RED)
line1.composite_op = "blacksrc"
self.layer = line1
#play sound
self.play_tilt_sound()
self.delay("Display",delay=1,handler=self.clear_layer)
def tilt_display(self,slam=False):
self.cancel_delayed("Display")
if slam:
# kill all delays
for mode in self.game.modes:
mode.__delayed = []
self.game.mute = True
self.stop_music()
self.game.sound.play(self.game.assets.sfx_slam)
# slam display goes here
tiltLayer = dmd.FrameLayer(opaque=True, frame=self.game.assets.dmd_slammed.frames[0])
textLayer = self.game.showcase.make_string(2,3,0,x=64,y=0,align="center",isOpaque=False,text="S L A M",isTransparent=True,condensed=False)
# Display the tilt graphic
self.layer = dmd.GroupedLayer(128,32,[tiltLayer,textLayer])
self.delay(delay=1.8,handler=self.game.sound.play,param=self.game.assets.quote_dejected)
self.delay(delay=3.5,handler=self.game.reset)
else:
# if in rectify party mode show rectify instead of tilt
if self.game.party_setting == 'Rectify':
displayString = "RECTIFY"
tiltSound = self.game.assets.sfx_spinDown
soundDelay = 0
else:
displayString = "TILT"
tiltSound = self.game.assets.quote_tilt
soundDelay = 1.5
# build a tilt graphic
tiltLayer = ep.EP_TextLayer(128/2, 7, self.game.assets.font_20px_az, "center", opaque=True).set_text(displayString,color=ep.RED)
# Display the tilt graphic
self.layer = tiltLayer
# play the tilt quote
self.delay(delay=soundDelay,handler=self.game.sound.play,param=tiltSound)
def tilted(self):
self.game.logger.debug("Interrupter Passing Tilt")
pass
def play_tilt_sound(self):
self.game.sound.play(self.game.assets.sfx_tiltDanger)
def ball_saved(self):
# don't show in certain situations
if self.game.drunk_multiball.running or \
self.game.moonlight.running or \
self.game.marshall_multiball.running or \
self.game.gm_multiball.running or \
self.game.stampede.running or \
self.game.cva.running or \
self.game.last_call.running:
return
# otherwise, party on
# play a quote
self.game.base.priority_quote(self.game.assets.quote_dontMove)
# show some display
anim = self.game.assets.dmd_ballSaved
myWait = len(anim.frames) / 12.0
# set the animation
animLayer = ep.EP_AnimatedLayer(anim)
animLayer.hold = True
animLayer.frame_time = 5
animLayer.opaque = True
# add listener frames
animLayer.add_frame_listener(2,self.game.sound.play,param=self.game.assets.sfx_ballSaved)
self.cancel_delayed("Display")
self.layer = animLayer
self.delay(delay=myWait + 0.5,handler=self.clear_layer)
def closing_song(self,duration):
attractMusic = 'Yes' == self.game.user_settings['Gameplay (Feature)']['Attract Mode Music']
if attractMusic:
#print "Playing Closing Song"
self.delay(delay=duration+1,handler=self.music_on,param=self.game.assets.music_goldmineMultiball)
# and set a delay to fade it out after 2 minutes
self.delay("Attract Fade",delay=60,handler=self.game.sound.fadeout_music,param=2000)
# new line to reset the volume after fade because it may affect new game
self.delay("Attract Fade",delay=62.5,handler=self.reset_volume)
# play a flasher lampshow
self.game.GI_lampctrl.play_show(self.game.assets.lamp_flashers, repeat=False)
# set a 2 second delay to allow the start button to work again
#print "Setting delay for start button"
self.delay(delay=duration+2,handler=self.enable_start)
def enable_start(self):
#print "Game start enabled again"
self.game.endBusy = False
def reset_volume(self):
self.game.sound.set_volume(self.game.volume_to_set)
def showdown_hit(self,points):
pointString = str(ep.format_score(points))
textLine1 = ep.EP_TextLayer(128/2, 2, self.game.assets.font_9px_AZ_outline, "center", opaque=False).set_text("<BAD> <GUY> <SHOT!>",color=ep.ORANGE)
textLine2 = ep.EP_TextLayer(128/2, 14, self.game.assets.font_12px_az_outline, "center", opaque=False)
textLine2.composite_op = "blacksrc"
textLine2.set_text(pointString,blink_frames=8,color=ep.RED)
combined = dmd.GroupedLayer(128,32,[textLine1,textLine2])
combined.composite_op = "blacksrc"
self.layer = combined
self.delay(name="Display",delay=1.5,handler=self.clear_layer)
def ball_added(self):
textLine = dmd.TextLayer(64, 12, self.game.assets.font_9px_AZ_outline, "center", opaque=False).set_text("<BALL> <ADDED>",blink_frames=8)
textLine.composite_op = "blacksrc"
self.layer = textLine
self.delay(name="Display",delay=1.5,handler=self.clear_layer)
def ball_save_activated(self):
textLine1 = dmd.TextLayer(128/2, 2, self.game.assets.font_9px_AZ_outline, "center", opaque=False).set_text("<BALL> <SAVER>")
textLine2 = ep.EP_TextLayer(128/2, 14, self.game.assets.font_12px_az_outline, "center", opaque=False)
textLine2.composite_op = "blacksrc"
textLine2.set_text("ACTIVATED",blink_frames=8,color=ep.GREEN)
combined = dmd.GroupedLayer(128,32,[textLine1,textLine2])
combined.composite_op = "blacksrc"
self.layer = combined
self.delay(name="Display",delay=1.5,handler=self.clear_layer)
def dude_escaped(self,amount):
backdrop = dmd.FrameLayer(opaque=True, frame=self.game.assets.dmd_escaped.frames[0])
backdrop.composite_op = "blacksrc"
if amount <= 0:
textString = "THEY GOT AWAY - YOU LOSE"
else:
textString = str(amount) + " MORE AND YOU LOSE"
textLine2 = dmd.TextLayer(128/2, 18, self.game.assets.font_5px_AZ, "center", opaque=False).set_text(textString,blink_frames=8)
textLine2.composite_op = "blacksrc"
combined = dmd.GroupedLayer(128,32,[backdrop,textLine2])
combined.composite_op = "blacksrc"
self.layer = combined
self.delay(name="Display",delay=1,handler=self.clear_layer)
## Status section, for the HALIBUT
# hold a flipper for 5 seconds to start - but only turn it on if it's not already on
def sw_flipperLwR_active_for_5s(self,sw):
if self.statusDisplay == "Off":
self.status_on('Right')
def sw_flipperLwL_active_for_5s(self,sw):
if self.statusDisplay == "Off":
self.status_on('Left')
# releasing the flipper you started with cancels the status
def sw_flipperLwR_inactive(self,sw):
if self.statusDisplay == "Right":
self.status_off()
def sw_flipperLwL_inactive(self,sw):
if self.statusDisplay == "Left":
self.status_off()
# tapping a flipper should skip slides - if the other flipper has the status active
def sw_flipperLwL_active(self,sw):
if self.statusDisplay == "Right":
self.status()
def sw_flipperLwR_active(self,sw):
if self.statusDisplay == "Left":
self.status()
def status_on(self,side):
if self.game.combos in self.game.modes:
self.statusDisplay = side
#print "STATUS GOES HERE"
# start the status display
self.status()
else:
pass
def status_off(self):
self.statusDisplay = "Off"
#print "STATUS ENDING"
self.cancel_delayed("Display")
# clear the layer
self.layer = None
# reset the page to 0
self.page = 0
def status(self):
# cancel the delay, in case we got pushed early
self.cancel_delayed("Display")
# hide the replay page if replays are disabled
max_page = 7
# by bumping up the max page by one if replays are enabled
if self.game.replays:
max_page += 1
# first, tick up the page
self.page += 1
# roll back around if we get over the number of pages
if self.page > max_page:
self.page = 1
# then show some junk based on what page we're on
if self.page == 1:
textLine1 = ep.EP_TextLayer(128/2, 1, self.game.assets.font_12px_az, "center", opaque=True).set_text("CURRENT",color=ep.YELLOW)
textLine2 = ep.EP_TextLayer(128/2, 16, self.game.assets.font_12px_az, "center", opaque=False).set_text("STATUS",color=ep.YELLOW)
textLine2.composite_op = "blacksrc"
combined = dmd.GroupedLayer(128,32,[textLine1,textLine2])
self.layer = combined
# bonus information
if self.page == 2:
multiplier = self.game.show_tracking('bonusX')
textString2 = str(multiplier) + "X MULTIPLIER"
bonus = self.game.show_tracking('bonus')
textString3 = "BONUS: " + ep.format_score(bonus)
# default three line display
self.tld("BONUS INFO:", textString2, textString3, color2=ep.ORANGE,color3=ep.ORANGE)
if self.page == 3:
# Multiball/Mine information
locked = self.game.show_tracking('ballsLocked')
if locked == 1:
textString2 = str(locked) + " BALL LOCKED"
else:
textString2 = str(locked) + " BALLS LOCKED"
shots = self.game.show_tracking('mineShotsTotal')
textString3 = str(shots) + " MINE SHOTS TOTAL"
# stock three line display
self.tld("MINE STATUS:", textString2, textString3, color2=ep.ORANGE,color3=ep.ORANGE)
# drunk multiball status
if self.page == 4:
# hits left to light drunk multiball
left = self.game.user_settings['Gameplay (Feature)']['Beer Mug Hits For Multiball'] - self.game.show_tracking('beerMugHits')
if left <= 0:
textString2 = "DRUNK MULTIBALL"
textString3 = "IS LIT"
else:
textString2 = str(left) + " MORE HITS"
textString3 = "FOR MULTIBALL"
# default three line display
self.tld("BEER MUG:",textString2,textString3, color2=ep.ORANGE,color3=ep.ORANGE)
# circle back and clear the layer
# CVA Information & Tumbleweeds
if self.page == 5:
left = self.game.show_tracking('tumbleweedShots') - self.game.show_tracking('tumbleweedHits')
if left <= 0:
textString2 = "COWBOYS V ALIENS"
textString3 = "IS LIT"
else:
textString2 = str(left) + " MORE WEEDS FOR"
textString3 = "COWBOYS V ALIENS"
self.tld("TUMBLEWEEDS:",textString2,textString3,color2=ep.ORANGE,color3=ep.ORANGE)
# combos information
if self.page == 6:
# combos to light badge
needed = self.game.user_settings['Gameplay (Feature)']['Combos for Star']
# combos so far
have = self.game.show_tracking('combos')
left = needed - have
if left <= 0:
textString2 = str(have) + " COMBOS"
textString3 = "BADGE IS LIT!"
else:
textString2 = str(have) + " COMBOS"
textString3 = str(left) + " MORE FOR BADGE"
self.tld("COMBO SHOTS:",textString2,textString3, color2=ep.ORANGE,color3=ep.ORANGE)
# Kills so far
if self.page == 7:
# quickdraws so far
quickdrawKills = self.game.show_tracking('quickdrawsWon')
# gunfights
gunfightKills = self.game.show_tracking('gunfightsWon')
textString2 = "QUICKDRAWS: " + str(quickdrawKills)
textString3 = "GUNFIGHTS: " + str(gunfightKills)
self.tld("GUN BATTLE WINS:",textString2,textString3, color2=ep.ORANGE,color3=ep.ORANGE)
# replay score
if self.page == 8:
self.layer = self.replay_score_page()
self.delay(name="Display",delay=3,handler=self.status)
def tld(self,textString1,textString2,textString3,color1=ep.WHITE,color2=ep.WHITE,color3=ep.WHITE):
textLine1 = ep.EP_TextLayer(128/2, 1, self.game.assets.font_7px_az, "center", opaque=False).set_text(textString1,color=color1)
textLine2 = ep.EP_TextLayer(128/2, 11, self.game.assets.font_7px_az, "center", opaque=False).set_text(textString2,color=color2)
textLine3 = ep.EP_TextLayer(128/2, 21, self.game.assets.font_7px_az, "center", opaque=False).set_text(textString3,color=color3)
combined = dmd.GroupedLayer(128,32,[textLine1,textLine2,textLine3])
self.layer = combined
def shoot_again(self,step=1):
# shown when starting an extra ball
if step == 1:
imageLayer = dmd.FrameLayer(opaque=True, frame=self.game.assets.dmd_shootAgain.frames[0])
self.game.base.play_quote(self.game.assets.quote_deepLaugh)
self.game.sound.play(self.game.assets.sfx_incoming)
self.layer = imageLayer
self.delay(delay = 2,handler=self.shoot_again, param=2)
if step == 2:
anim = self.game.assets.dmd_shootAgain
# math out the wait
myWait = len(anim.frames) / 10.0
# set the animation
animLayer = ep.EP_AnimatedLayer(anim)
animLayer.hold=True
animLayer.frame_time = 6
animLayer.opaque = True
animLayer.add_frame_listener(2,self.game.sound.play,param=self.game.assets.sfx_lowBoom)
# this flag tells the player intro quote to not play
self.hush = True
animLayer.add_frame_listener(4,self.game.ball_starting)
self.layer = animLayer
self.delay(delay=myWait,handler=self.shoot_again,param=3)
if step == 3:
imageLayer = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_shootAgain.frames[7])
self.play_ordered_quote(self.game.assets.quote_shootAgain,'shoot_again')
textLine1 = ep.EP_TextLayer(80,5, self.game.assets.font_9px_az, "center", opaque= False).set_text("SHOOT",color=ep.GREEN)
textLine2 = ep.EP_TextLayer(80,15, self.game.assets.font_9px_az, "center", opaque= False).set_text("AGAIN",color=ep.GREEN)
combined = dmd.GroupedLayer(128,32,[imageLayer,textLine1,textLine2])
self.layer = combined
self.delay(delay = 1.5,handler=self.clear_layer)
def train_disabled(self):
line1 = dmd.TextLayer(128/2, 3, self.game.assets.font_9px_az, "center", opaque=False).set_text("TRAIN DISABLED")
line2 = dmd.TextLayer(128/2, 15, self.game.assets.font_9px_az, "center", opaque=False).set_text("CHECK ENCODER SWITCH")
self.layer = dmd.GroupedLayer(128,32,[line1,line2])
self.game.base.repeat_ding(3)
self.delay(delay=2,handler=self.clear_layer)
def restarting(self):
line1 = dmd.TextLayer(128/2, 3, self.game.assets.font_9px_az, "center", opaque=False).set_text("NEW")
line2 = dmd.TextLayer(128/2, 15, self.game.assets.font_9px_az, "center", opaque=False).set_text("GAME")
self.layer = dmd.GroupedLayer(128,32,[line1,line2])
self.game.base.repeat_ding(3)
self.delay(delay=2,handler=self.clear_layer)
def add_player(self):
# show the score layer for a second
self.layer = self.game.score_display.layer
self.delay(delay = 1,handler=self.clear_layer)
def show_player_scores(self):
self.layer = self.game.score_display.layer
self.cancel_delayed("clear score")
self.delay("clear score", delay=2, handler=self.clear_layer)
# this for low priority modes to throw a display over something else that is running
def cut_in(self,layer,timer):
# cancel any already running cut in
self.cancel_delayed("Cut In")
# set the layer to the one given
self.layer = layer
# set the timer for clearing
self.delay("Cut In",delay=timer,handler=self.clear_layer)
# this throws a message if the coin door is opened
def sw_coinDoorClosed_inactive(self,sw):
line1 = dmd.TextLayer(128/2, 3, self.game.assets.font_7px_az, "center", opaque=True).set_text("COIN DOOR OPEN")
line2 = dmd.TextLayer(128/2, 15, self.game.assets.font_7px_az, "center", opaque=False).set_text("HIGH VOLTAGE DISABLED")
self.layer = dmd.GroupedLayer(128,32,[line1,line2])
self.game.base.repeat_ding(3)
self.delay(delay=3,handler=self.clear_layer)
# Jets increased display
def bumpers_increased(self,value):
backdrop = dmd.FrameLayer(opaque=True,frame=self.game.assets.dmd_singleCactusBorder.frames[0])
topLine = dmd.TextLayer(60,1,self.game.assets.font_5px_AZ, "center", opaque=False).set_text("JET BUMPERS VALUE")
increasedLine1 = dmd.TextLayer(60,8,self.game.assets.font_12px_az, "center", opaque=False).set_text("INCREASED")
increasedLine2 = dmd.TextLayer(60,8,self.game.assets.font_15px_az_outline, "center", opaque=False)
increasedLine1.composite_op = "blacksrc"
increasedLine2.composite_op = "blacksrc"
increasedLine2.set_text("INCREASED")
pointsLine = dmd.TextLayer(60,18,self.game.assets.font_12px_az_outline,"center",opaque=False)
pointsLine.composite_op = "blacksrc"
pointsLine.set_text(str(ep.format_score(value)))
script = []
layer1 = dmd.GroupedLayer(128,32,[backdrop,topLine,increasedLine1,pointsLine])
layer2 = dmd.GroupedLayer(128,32,[backdrop,topLine,pointsLine,increasedLine2])
script.append({'seconds':0.3,'layer':layer1})
script.append({'seconds':0.3,'layer':layer2})
self.game.base.play_quote(self.game.assets.quote_yippie)
self.layer = dmd.ScriptedLayer(128,32,script)
self.delay("Display",delay=2,handler=self.clear_layer)
# mad cow display
def mad_cow(self,step=1):
backdrop = ep.EP_AnimatedLayer(self.game.assets.dmd_cows)
backdrop.hold = False
backdrop.repeat = True
backdrop.frame_time = 6
backdrop.opaque = True
if step == 1:
noises = [self.game.assets.sfx_cow1,self.game.assets.sfx_cow2]
sound = random.choice(noises)
self.game.sound.play(sound)
textLine1 = dmd.TextLayer(64,1,self.game.assets.font_12px_az_outline, "center", opaque=False)
textLine2 = dmd.TextLayer(64,16,self.game.assets.font_12px_az_outline, "center", opaque=False)
textLine1.composite_op = "blacksrc"
textLine2.composite_op = "blacksrc"
textLine1.set_text("MAD",blink_frames=15)
textLine2.set_text("COW",blink_frames=15)
combined = dmd.GroupedLayer(128,32,[backdrop,textLine1,textLine2])
self.layer = combined
self.delay("Display",delay=1.5,handler=self.mad_cow,param=2)
elif step == 2:
textLine1 = dmd.TextLayer(64,9,self.game.assets.font_12px_az_outline, "center",opaque=False)
textLine1.composite_op = "blacksrc"
textLine1.set_text(str(ep.format_score(50000)))
combined = dmd.GroupedLayer(128,32,[backdrop,textLine1])
self.layer = combined
self.delay("Display",delay=1.5,handler=self.clear_layer)
else:
pass
# volume controls
# Outside of the service mode, up/down control audio volume.
def sw_down_active(self, sw):
#print "Volume Down"
if self.game.new_service not in self.game.modes:
# set the volume down one
volume = self.game.volume_down()
# save the value
#print "New volume: " + str(volume)
self.game.user_settings['Sound']['Initial volume']= volume
self.game.save_settings()
# if we're not in a game, turn on some music and throw a display
self.volume_display(volume)
return True
def sw_up_active(self, sw):
#print "Volume Up"
if self.game.new_service not in self.game.modes:
# set the volume up one
volume = self.game.volume_up()
#print "New volume: " + str(volume)
self.game.user_settings['Sound']['Initial volume'] = volume
self.game.save_settings()
self.volume_display(volume)
return True
def volume_display(self,volume):
# cancel any previous delay
self.cancel_delayed("Volume")
# start a song if one isn't already playing
if not self.playing and self.game.base not in self.game.modes:
self.playing = True
self.game.sound.play_music(self.game.assets.music_shooterLaneGroove,loops=-1)
# throw some display action
topLine = dmd.TextLayer(64,3,self.game.assets.font_7px_az, "center", opaque=True)
string = "VOLUME: " + str(volume)
topLine.set_text(string)
volumeLine = dmd.TextLayer(64,13,self.game.assets.font_13px_score, "center", opaque=False)
volumeString = ""
while len(volumeString) < volume:
volumeString += "A"
while len(volumeString) < 10:
volumeString += "B"
volumeString += "C"
volumeLine.set_text(volumeString)
self.layer = dmd.GroupedLayer(128,32,[topLine,volumeLine])
# set a delay to cancel
self.delay("Volume",delay = 2,handler=self.clear_volume_display)
def clear_volume_display(self):
# turn the music off
if self.game.base not in self.game.modes:
self.stop_music()
# turn off the playing flag
self.playing = False
# clear the layer
self.clear_layer()
def switch_warning(self,switches):
script = []
switchCount = len(switches)
# set up the text layer
textString = "< CHECK SWITCHES >"
textLayer = dmd.TextLayer(128/2, 24, self.game.assets.font_6px_az_inverse, "center", opaque=False).set_text(textString)
textLayer.composite_op = 'blacksrc'
script.append({'seconds':1.8,'layer':textLayer})
# then loop through the bad switches
for i in range(0,switchCount,1):
name = switches[i]['switchName']
count = switches[i]['count']
textString = "< " + name + " >"
textLayer = dmd.TextLayer(128/2, 24, self.game.assets.font_6px_az_inverse, "center", opaque=False).set_text(textString)
textLayer.composite_op = 'blacksrc'
script.append({'seconds':1.8,'layer':textLayer})
display = dmd.ScriptedLayer(128,32,script)
display.composite_op = "blacksrc"
self.layer = display
# Allow service mode to be entered during a game.
def sw_enter_active(self, sw):
#print "ENTERING NEW SERVICE MODE"
# clear the interrupter layer - just in case
self.clear_layer()
# if attract mode is running, stop the lampshow
if self.game.attract_mode in self.game.modes:
# kill the lampshow
self.game.lampctrl.stop_show()
self.game.attract_mode.unload()
self.game.lamp_control.disable_all_lamps()
# stop the music
self.stop_music()
# stop the train
self.game.train.stop()
# stop the mine
self.game.mountain.stop()
# drop the bad guys
self.game.bad_guys.slay()
# kill the gunfight pins
self.game.coils.rightGunFightPost.disable()
self.game.coils.leftGunFightPost.disable()
# remove all the active modes
modequeue_copy = list(self.game.modes)
for mode in modequeue_copy:
mode.unload()
# then add the service mode
self.game.modes.add(self.game.new_service)
self.unload()
return True
# knocker
def knock(self,value,realOnly = False):
if self.game.useKnocker:
self.game.coils.knocker.pulse(self.knockerStrength)
#print "Fired knocker!"
else:
if realOnly:
pass
else:
self.game.sound.play(self.game.assets.sfx_knocker)
value -= 1
# if there's more than one, come back
if value > 0:
self.delay(delay=0.5,handler=self.knock,param=value)
# replay score display
def replay_score_display(self):
# if the player hasn't already been shown the replay hint - show it
if not self.game.show_tracking('replay_hint'):
# set the hint tracking to true to prevent showing on extra balls
self.game.set_tracking('replay_hint', True)
self.layer = self.replay_score_page()
self.delay(delay=1.5,handler=self.clear_layer)
def replay_score_page(self):
replay_text = ep.format_score(self.game.user_settings['Machine (Standard)']['Replay Score'])
score_text = ep.format_score(self.game.current_player().score)
textLine1 = ep.EP_TextLayer(64, 1, self.game.assets.font_5px_bold_AZ, "center", opaque=True).set_text("REPLAY SCORE:",color=ep.ORANGE)
textLine2 = ep.EP_TextLayer(64, 7, self.game.assets.font_7px_az, "center", opaque=False).set_text(replay_text,color=ep.GREEN)
textLine3 = ep.EP_TextLayer(64, 17, self.game.assets.font_5px_bold_AZ, "center", opaque=False).set_text("YOUR SCORE:",color=ep.ORANGE)
textLine4 = ep.EP_TextLayer(64, 23, self.game.assets.font_7px_az, "center", opaque=False).set_text(score_text,blink_frames=8,color=ep.RED)
layer = dmd.GroupedLayer(128,32,[textLine1,textLine2,textLine3,textLine4])
return layer
def replay_award_display(self):
anim = self.game.assets.dmd_fireworks
myWait = (len(anim.frames) / 10.0) + 1
animLayer = ep.EP_AnimatedLayer(anim)
animLayer.hold = True
animLayer.frame_time = 6
# firework sounds keyframed
animLayer.add_frame_listener(14,self.game.sound.play,param=self.game.assets.sfx_fireworks1)
animLayer.add_frame_listener(17,self.game.sound.play,param=self.game.assets.sfx_fireworks2)
animLayer.add_frame_listener(21,self.game.sound.play,param=self.game.assets.sfx_fireworks3)
animLayer.add_frame_listener(24,self.game.sound.play,param=self.game.assets.quote_replay)
animLayer.composite_op = "blacksrc"
textLine1 = "REPLAY AWARD"
textLayer1 = ep.EP_TextLayer(58, 5, self.game.assets.font_10px_AZ, "center", opaque=True).set_text(textLine1,color=ep.BLUE)
textLayer1.composite_op = "blacksrc"
textLine2 = self.game.user_settings['Machine (Standard)']['Replay Award']
if textLine2.upper == "EXTRA BALL" and self.game.max_extra_balls_reached():
textLine2 = ep.format_score(500000)
textLayer2 = dmd.TextLayer(58, 18, self.game.assets.font_10px_AZ, "center", opaque=False).set_text(textLine2.upper())
textLayer2.composite_op = "blacksrc"
combined = dmd.GroupedLayer(128,32,[textLayer1,textLayer2,animLayer])
self.layer = combined
self.delay(delay=myWait,handler=self.game.sound.play,param=self.game.assets.sfx_cheers)
self.delay("Display", delay=myWait,handler=self.clear_layer)
def tournament_start_display(self):
textLine1 = ep.EP_TextLayer(64, 1, self.game.assets.font_7px_az, "center", opaque=True).set_text("TOURNAMENT MODE",color=ep.RED)
textLine2 = ep.EP_TextLayer(64, 11, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("PRESS START",blink_frames=8,color=ep.YELLOW)
textLine3 = ep.EP_TextLayer(64, 17, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("NOW FOR",color=ep.YELLOW)
textLine4 = ep.EP_TextLayer(64, 23, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("TOURNAMENT PLAY",color=ep.YELLOW)
self.tournamentTimerLayer = ep.EP_TextLayer(122,8,self.game.assets.font_17px_score, "right",opaque=False).set_text("9",color=ep.GREEN)
self.tournamentTimerLayer2 = ep.EP_TextLayer(6,8,self.game.assets.font_17px_score, "left",opaque=False).set_text("9",color=ep.GREEN)
self.layer = dmd.GroupedLayer(128,32,[textLine1,textLine2,textLine3,textLine4,self.tournamentTimerLayer,self.tournamentTimerLayer2])
def broadcast(self,layer,time):
# take a layer sent in and show it for x seconds
self.cancel_delayed("Display")
self.layer = layer
self.delay("Display",delay = time,handler = self.clear_layer)
| StarcoderdataPython |
3485777 | <gh_stars>0
#!python
import string
# Hint: Use these string constants to ignore capitalization and/or punctuation
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
def is_palindrome(text):
"""A string of characters is a palindrome if it reads the same forwards and
backwards, ignoring punctuation, whitespace, and letter casing."""
# implement is_palindrome_iterative and is_palindrome_recursive below, then
# change this to call your implementation to verify it passes all tests
assert isinstance(text, str), 'input is not a string: {}'.format(text)
text = text.rstrip().lower()
new_text = ""
for character in text:
if character in string.ascii_letters:
new_text += character
# return is_palindrome_iterative(new_text)
return is_palindrome_recursive(new_text)
def is_palindrome_iterative(text):
# compare first and last item in string and iterate through the string
# check if each character matches, if they don't match then return false
# keep doing this until indices == or pass eachother
left_index = 0
right_index = len(text) - 1
while left_index < right_index:
if text[left_index] != text[right_index]:
return False
left_index += 1
right_index -= 1
return True
# once implemented, change is_palindrome to call is_palindrome_iterative
# to verify that your iterative implementation passes all tests
def is_palindrome_recursive(text, left_index=None, right_index=None):
# base case
if text[left_index] != text[right_index]:
return False
if left_index == right_index or left_index > right_index:
return True
# recursion
return is_palindrome_recursive(text, left_index + 1, right_index - 1)
# once implemented, change is_palindrome to call is_palindrome_recursive
# to verify that your iterative implementation passes all tests
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) > 0:
for arg in args:
is_pal = is_palindrome(arg)
result = 'PASS' if is_pal else 'FAIL'
is_str = 'is' if is_pal else 'is not'
print('{}: {} {} a palindrome'.format(result, repr(arg), is_str))
else:
print('Usage: {} string1 string2 ... stringN'.format(sys.argv[0]))
print(' checks if each argument given is a palindrome')
if __name__ == '__main__':
main()
| StarcoderdataPython |
5097885 | <gh_stars>0
import chainer
import chainer.functions as F
import chainer.links as L
"""
Based on chainer official example
https://github.com/pfnet/chainer/tree/master/examples/ptb
Modified by shi3z March 28,2016
"""
class RNNLM(chainer.Chain):
"""Recurrent neural net languabe model for penn tree bank corpus.
This is an example of deep LSTM network for infinite length input.
"""
def __init__(self, n_input_units=1000,n_vocab=100, n_units=100, train=True):
super(RNNLM, self).__init__(
inputVector= L.Linear(n_input_units, n_units),
embed=L.EmbedID(n_vocab, n_units),
l1=L.LSTM(n_units, n_units),
l2=L.LSTM(n_units, n_units),
l3=L.Linear(n_units, n_vocab),
)
self.train = train
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
def __call__(self, x,mode=0):
if mode == 1:
h0 = self.inputVector(x)
else:
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0, train=self.train))
h2 = self.l2(F.dropout(h1, train=self.train))
y = self.l3(F.dropout(h2, train=self.train))
return y
| StarcoderdataPython |
281450 | <filename>track17/exceptions.py
"""
Define custom exceptions
"""
__all__ = (
'Track17Exception',
'InvalidCarrierCode',
'DateProcessingError'
)
class Track17Exception(Exception):
def __init__(self, message: str, code: int = None):
self.message = message
self.code = code
super().__init__()
def __str__(self) -> str:
if self.code:
return f'{self.message} (Code: {self.code})'
return self.message
class InvalidCarrierCode(Track17Exception):
pass
class DateProcessingError(Track17Exception):
pass
| StarcoderdataPython |
193852 | <reponame>belang/pymtl
#=======================================================================
# Bus.py
#=======================================================================
from pymtl import *
class Bus( Model ):
def __init__( s, nports, dtype ):
sel_nbits = clog2( nports )
s.in_ = [ InPort ( dtype ) for _ in range( nports ) ]
s.out = [ OutPort ( dtype ) for _ in range( nports ) ]
s.sel = InPort ( sel_nbits )
@s.combinational
def comb_logic():
for i in range( nports ):
s.out[i].value = s.in_[ s.sel ]
def line_trace( s ):
in_str = ' '.join( [ str(x) for x in s.in_ ] )
sel_str = str( s.sel )
out_str = ' '.join( [ str(x) for x in s.out ] )
return '{} ( {} ) {}'.format( in_str, sel_str, out_str )
| StarcoderdataPython |
107045 | print("======================")
print("RADAR ELETRÔNICO!")
print("======================")
limite = 80.0
multa = 7
velocidade = float(input("Qual a sua velocidade: "))
if velocidade <= limite:
print("Boa Tarde, cuidado na estrada, siga viagem!")
else:
valor = (velocidade - limite) * 7
print(f"Você ultrapassou o limite de velocidade e foi multado em {valor:.2f} reais!") | StarcoderdataPython |
5192629 | <reponame>Transkribus/TranskribusDU
'''
Created on 5 avr. 2019
@author: meunier
'''
import numpy as np
from graph.Graph import Graph
def test_one_edge():
o = Graph()
# 2 nodes linked by 1 edge
nf = np.array([
[0, 0]
, [1, 11]
])
e = np.array([
[0, 1]
])
ef = np.array([
[-0]
])
X = (nf, e, ef)
Xd = o.convert_X_to_LineDual(X)
nfd, ed, efd = Xd
assert (nfd == ef).all()
assert (ed == np.array([
])).all()
assert (efd== np.array([
])).all()
def test_two_edge():
o = Graph()
# 2 nodes linked by 1 edge
nf = np.array([
[0, 0]
, [1, 11]
, [2, 22]
])
e = np.array([
[0, 1]
, [1, 2]
])
ef = np.array([
[-0]
, [-1]
])
X = (nf, e, ef)
Xd = o.convert_X_to_LineDual(X)
nfd, ed, efd = Xd
assert (nfd == ef).all()
assert (ed == np.array([
[0, 1]
])).all()
assert (efd== np.array([
[1, 11]
])).all()
def test_three_edge():
o = Graph()
# 2 nodes linked by 1 edge
nf = np.array([
[0, 0]
, [1, 11]
, [2, 22]
])
e = np.array([
[0, 1]
, [1, 2]
, [2, 0]
])
ef = np.array([
[-0]
, [-1]
, [-2]
])
X = (nf, e, ef)
Xd = o.convert_X_to_LineDual(X)
nfd, ed, efd = Xd
assert (nfd == ef).all()
assert (ed == np.array([ [0, 1]
, [0, 2]
, [1, 2]
])).all(), ed
assert (efd== np.array([ [1, 11]
, [0, 0]
, [2, 22]
])).all(), efd
def test_three_edge_and_lonely_node():
o = Graph()
# 2 nodes linked by 1 edge
nf = np.array([
[0, 0]
, [1, 11]
, [2, 22]
, [9, 99] #lonely node
])
e = np.array([
[0, 1]
, [1, 2]
, [2, 0]
])
ef = np.array([
[-0]
, [-1]
, [-2]
])
X = (nf, e, ef)
Xd = o.convert_X_to_LineDual(X)
nfd, ed, efd = Xd
assert (nfd == ef).all()
assert (ed == np.array([ [0, 1]
, [0, 2]
, [1, 2]
])).all(), ed
assert (efd== np.array([ [1, 11]
, [0, 0]
, [2, 22]
])).all(), efd
def test_basic_numpy_stuff():
# to make sure I do duplicate and revert edges properly...
a = np.array(range(6)).reshape((3,2))
refaa = np.array([
[0, 1],
[2, 3],
[4, 5],
[1, 0],
[3, 2],
[5, 4]])
assert (np.vstack((a, a[:,[1,0]])) == refaa).all()
| StarcoderdataPython |
3203782 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import random
import csv
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
from collections import OrderedDict,defaultdict
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users = mnist_iid(dataset_train, args.num_users)
else:
dict_users = mnist_noniid(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
net_glob5 = CNNMnist(args=args).to(args.device)
net_glob10 = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
print(net_glob)
net_glob.train()
net_glob5.train()
net_glob10.train()
#STRUCTURE: KEY = ROUND, VAL = [training_loss, {agentId:flattended_updates}]
malicious_structure5 = defaultdict()
malicious_structure10 = defaultdict()
#STRUCTURE: KEY = ROUND, VAL = [training_loss, {agentId: flattended_updates}]
non_malicious_structure = defaultdict()
non_malicious_structure5 = defaultdict()
non_malicious_structure10 = defaultdict()
# copy weights
w_glob = net_glob.state_dict()
w_glob5 = net_glob5.state_dict()
w_glob10 = net_glob10.state_dict()
# training - NO ATTACK
loss_train = []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
#VIVEK constant attack experiment - 5 MALICIOUS
loss_train_5 = []
fixed_agent_5 = random.sample(range(32),5)
updates_recorded_mapping_5 = defaultdict(bool)
for i in fixed_agent_5:
updates_recorded_mapping_5[i] = False #KEY = agent no. & VAL = boolean
fixed_agent_storage_mapping_5 = {} #KEY = agent no. & VAL = Fixed Updates
count_array_5 = []
#VIVEK constant attack experiment - 10 MALICIOUS
loss_train_10 = []
fixed_agent_10 = random.sample(range(32),10)
updates_recorded_mapping_10 = defaultdict(bool)
for i in fixed_agent_10:
updates_recorded_mapping_10[i] = False
fixed_agent_storage_mapping_10 = {}
count_array_10 = []
for iter in range(args.epochs):
malicious_structure5[iter] = [0.0,defaultdict()]
malicious_structure10[iter] = [0.0,defaultdict()]
non_malicious_structure[iter] = [0.0,defaultdict()]
non_malicious_structure5[iter] = [0.0,defaultdict()]
non_malicious_structure10[iter] = [0.0,defaultdict()]
#agent_found_count = 0
w_locals, loss_locals = [], [] #w_locals = array of local_weights
w_locals_5, loss_locals_5 = [],[]
w_locals_10, loss_locals_10 = [],[]
m = max(int(args.frac * args.num_users), 1) #m = number of users used in one ROUND/EPOCH, check utils.options for more clarity on this
idxs_users = np.random.choice(range(args.num_users), m, replace=False) #Randomly selecting m users out of 32 users. NEED TO REPLACE THIS WITH OUR SAMPLING MECHANISM
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local5 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
local10 = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx])
w, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))
w5, loss5 = local5.train(net=copy.deepcopy(net_glob5).to(args.device))
w10, loss10 = local10.train(net=copy.deepcopy(net_glob10).to(args.device))
#STRUCTURE: {agentId:{flattened_updates}}
agent_weight_dict = {idx:defaultdict()}
flattened_w = copy.deepcopy(w)
new_val = flattened_w['conv1.weight'].reshape(-1)
flattened_w['conv1.weight'] = new_val
new_val = flattened_w['conv2.weight'].reshape(-1)
flattened_w['conv2.weight'] = new_val
new_val = flattened_w['fc1.weight'].reshape(-1)
flattened_w['fc1.weight'] = new_val
new_val = flattened_w['fc2.weight'].reshape(-1)
flattened_w['fc2.weight'] = new_val
non_malicious_structure[iter][1][idx] = flattened_w
#print(flattened_w['conv1.weight'].shape)
#print(flattened_w['conv1.bias'].shape)
#print(flattened_w['conv2.weight'].shape)
#print(flattened_w['conv2.bias'].shape)
#print(flattened_w['fc1.weight'].shape)
#print(flattened_w['fc1.bias'].shape)
#print(flattened_w['fc2.weight'].shape)
#print(flattened_w['fc2.bias'].shape)
print("***BLAH BLAH BLAH***")
if idx in fixed_agent_5:
if updates_recorded_mapping_5[idx]:
w5 = copy.deepcopy(fixed_agent_storage_mapping_5[idx])
elif not updates_recorded_mapping_5[idx]:
fixed_agent_storage_mapping_5[idx] = copy.deepcopy(w5)
updates_recorded_mapping_5[idx] = True
flattened_w5 = copy.deepcopy(w5)
new_val = flattened_w5['conv1.weight'].reshape(-1)
flattened_w5['conv1.weight'] = new_val
new_val = flattened_w5['conv2.weight'].reshape(-1)
flattened_w5['conv2.weight'] = new_val
new_val = flattened_w5['fc1.weight'].reshape(-1)
flattened_w5['fc1.weight']= new_val
new_val = flattened_w5['fc2.weight'].reshape(-1)
flattened_w5['fc2.weight']= new_val
#ADD DATA TO MALICIOUS STRUCTURE
malicious_structure5[iter][1][idx] = flattened_w5
if idx not in fixed_agent_5:
flattened_w5 = copy.deepcopy(w5)
new_val = flattened_w5['conv1.weight'].reshape(-1)
flattened_w5['conv1.weight'] = new_val
new_val = flattened_w5['conv2.weight'].reshape(-1)
flattened_w5['conv2.weight'] = new_val
new_val = flattened_w5['fc1.weight'].reshape(-1)
flattened_w5['fc1.weight']= new_val
new_val = flattened_w5['fc2.weight'].reshape(-1)
flattened_w5['fc2.weight']= new_val
#ADD DATA TO NON-MALICIOUS STRUCTURE
non_malicious_structure5[iter][1][idx] = flattened_w5
if idx in fixed_agent_10:
if updates_recorded_mapping_10[idx]:
w10 = copy.deepcopy(fixed_agent_storage_mapping_10[idx])
elif not updates_recorded_mapping_10[idx]:
fixed_agent_storage_mapping_10[idx] = copy.deepcopy(w10)
updates_recorded_mapping_10[idx] = True
flattened_w10 = copy.deepcopy(w10)
new_val = flattened_w10['conv1.weight'].reshape(-1)
flattened_w10['conv1.weight'] = new_val
new_val = flattened_w10['conv2.weight'].reshape(-1)
flattened_w10['conv2.weight'] = new_val
new_val = flattened_w10['fc1.weight'].reshape(-1)
flattened_w10['fc1.weight']= new_val
new_val = flattened_w10['fc2.weight'].reshape(-1)
flattened_w10['fc2.weight']= new_val
#ADD DATA TO MALICIOUS STRUCTURE
malicious_structure10[iter][1][idx] = flattened_w10
if idx not in fixed_agent_10:
flattened_w10 = copy.deepcopy(w10)
new_val = flattened_w10['conv1.weight'].reshape(-1)
flattened_w10['conv1.weight'] = new_val
new_val = flattened_w10['conv2.weight'].reshape(-1)
flattened_w10['conv2.weight'] = new_val
new_val = flattened_w10['fc1.weight'].reshape(-1)
flattened_w10['fc1.weight']= new_val
new_val = flattened_w10['fc2.weight'].reshape(-1)
flattened_w10['fc2.weight']= new_val
#ADD DATA TO NON-MALICIOUS STRUCTURE
non_malicious_structure10[iter][1][idx] = flattened_w10
#NO ATTACK
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
#5 MALICIOUS
w_locals_5.append(copy.deepcopy(w5))
loss_locals_5.append(copy.deepcopy(loss5))
#10 MALICIOUS
w_locals_10.append(copy.deepcopy(w10))
loss_locals_10.append(copy.deepcopy(loss10))
# update global weights
w_glob = FedAvg(w_locals)
w_glob_5 = FedAvg(w_locals_5)
w_glob_10 = FedAvg(w_locals_10)
# copy weight to net_glob
net_glob.load_state_dict(w_glob)
net_glob5.load_state_dict(w_glob_5)
net_glob10.load_state_dict(w_glob_10)
# print loss
loss_avg = sum(loss_locals) / len(loss_locals)
loss_avg_5 = sum(loss_locals_5) / len(loss_locals_5)
loss_avg_10 = sum(loss_locals_10) / len(loss_locals_10)
non_malicious_structure[iter][0] = loss_avg
non_malicious_structure5[iter][0] = loss_avg_5
non_malicious_structure10[iter][0] = loss_avg_10
malicious_structure5[iter][0] = loss_avg_5
malicious_structure10[iter][0] = loss_avg_10
print('NO ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))
print('C5 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_5))
print('C10 ATTACK ---> Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg_10))
#count_array.append(agent_found_count)
loss_train.append(loss_avg)
loss_train_5.append(loss_avg_5)
loss_train_10.append(loss_avg_10)
# plot loss curve
#plt.figure()
#plt.subplots()
#attack_no = plt.plot(range(len(loss_train)), loss_train)
#attack_1 = plt.plot(range(len(loss_train_1)),loss_train_1)
#plt.ylabel('train_loss')
#plt.savefig('log/fed_{}_{}_{}_C{}_iid{}.png'.format(args.dataset, args.model, args.epochs, args.frac, args.iid))
#print("COUNT DATA",str(count_array))
print("NO ATTACK DATA=",loss_train)
print("5 ATTACK DATA=",loss_train_5)
print("10 ATTACK DATA=",loss_train_10)
with open("no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure.keys():
writer.writerow((str(items),str(non_malicious_structure[items][0]),str(non_malicious_structure[items][1])))
with open("5_no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure5.keys():
writer.writerow((str(items),str(non_malicious_structure5[items][0]),str(non_malicious_structure5[items][1])))
with open("10_no_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in non_malicious_structure10.keys():
writer.writerow((str(items),str(non_malicious_structure10[items][0]),str(non_malicious_structure10[items][1])))
with open("5_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in malicious_structure5.keys():
writer.writerow((str(items),str(malicious_structure5[items][0]),str(malicious_structure5[items][1])))
with open("10_malicious_records.csv","w+") as csv_file:
writer = csv.writer(csv_file,delimiter=',')
writer.writerow(("ROUND","TRAIN_LOSS","AGENT_UPDATES"))
for items in malicious_structure10.keys():
writer.writerow((str(items),str(malicious_structure10[items][0]),str(malicious_structure10[items][1])))
# testing
net_glob.eval()
#print("Agent_Found_Count",agent_found_count)
acc_train, loss_train = test_img(net_glob, dataset_train, args)
acc_test, loss_test = test_img(net_glob, dataset_test, args)
print("Training accuracy (NO ATTACK): {:.2f}".format(acc_train))
print("Testing accuracy (NO ATTACK): {:.2f}".format(acc_test))
net_glob5.eval()
acc_train5, loss_train_5 = test_img(net_glob5, dataset_train, args)
acc_test5, loss_test_5 = test_img(net_glob5, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_train5))
print("Testing accuracy (CONSTANT ATTACK 5): {:.2f}".format(acc_test5))
net_glob10.eval()
acc_train10, loss_train_10 = test_img(net_glob10, dataset_train, args)
acc_test10, loss_test_10 = test_img(net_glob10, dataset_test, args)
print("Training accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_train10))
print("Testing accuracy (CONSTANT ATTACK 10): {:.2f}".format(acc_test10))
| StarcoderdataPython |
11260631 | # django
from django.db import models
# graphql
from graphql.execution.base import ResolveInfo
# graphene
import graphene
# app
from ..registry import registry
def SnippetsQueryMixin():
class Mixin:
if registry.snippets:
class Snippet(graphene.types.union.Union):
class Meta:
types = registry.snippets.types
snippets = graphene.List(Snippet,
typename=graphene.String(required=True))
def resolve_snippets(self, _info: ResolveInfo, typename: str) -> models.Model:
node = registry.snippets_by_name[typename]
cls = node._meta.model
return cls.objects.all()
else: # pragma: no cover
pass
return Mixin
| StarcoderdataPython |
341700 | """
Author : <NAME>
Year : 2020
Model of the flask application, contains all the functions manipulating the database.
The database managed with TinyDB and stored in a file named **db.json**.
"""
from tinydb import TinyDB, Query, where
import networkx as nx
from networkx.algorithms import isomorphism as isoalg
# Build the database
db = TinyDB('db.json')
def add_iso(g_id):
"""
Add the graph with id **g_id** in I by storing it in the database with the type 'iso'
:param g_id: id of a graph
:return: True
"""
db.insert({'type': 'iso', 'g_id': g_id})
return True
def remove_iso(g_id):
"""
Remove the graph with id **g_id** in I by removing that id stored with the type 'iso' from the database
:param g_id: id of a graph
:return: True
"""
q = Query()
db.remove((q.type == 'iso') & (q.g_id == g_id))
return True
def add_node(g_id, u_id, x, y):
"""
Add a node with id **u_id** to the graph with id **g_id** at coordinates **x** and **y** by storing all those information in the
database with the type 'node'. If such a node is already associated to the graph in the database, nothing is done.
:param g_id: id of a graph
:param u_id: id of a node
:param x: x coordinate of the node
:param y: y coordinate of the node
:return: True if the node is added to the graph and False otherwise
"""
q = Query()
i = db.search((q.type == 'node') & (q.g_id == g_id) & (q.u_id == u_id)) # Prevent dupplicates
if len(i) == 0:
db.insert({'type': 'node', 'g_id': g_id, 'u_id': u_id, 'x': x, 'y': y})
return True
else:
return False
def add_edge(g_id, u_id, v_id):
"""
Add an edge linking the nodes with ids **u_id** and **v_id** to the graph with id **g_id** by storing all those information in
the database with the type 'edge'. If such an edge is already associated to the graph in the database, if **u_id** equals **v_id**
or if **u_id** or **v_id** do not belong to the graph in the database, nothing is done.
:param g_id: id of a graph
:param u_id: id of a node
:param v_id: id of a node
:return: True if the edge is added to the graph and False otherwise
"""
if u_id == v_id:
return False
if v_id < u_id:
u_id, v_id = v_id, u_id
q = Query()
i = db.search((q.type == 'node') & (q.g_id == g_id) & ((q.u_id == u_id) | (q.u_id == v_id)))
if len(i) != 2:
return False
i = db.search((q.type == 'edge') & (q.g_id == g_id) & (q.u_id == u_id) & (q.v_id == v_id)) # Prevent dupplicates
if len(i) == 0:
db.insert({'type': 'edge', 'g_id': g_id, 'u_id': u_id, 'v_id': v_id})
return True
return False
def remove_node(g_id, u_id):
"""
Remove the node with id **u_id** from the graph with id **g_id** by removing all those information stored in the
database with the type 'node'. Remove also every edge incident to that node from the graph by removing every
document from the database containing the type 'edge' and where the id of one of the two extremities is **u_id**
If no such node exists in the graph in the database, nothing is done.
:param g_id: id of a graph
:param u_id: id of a node
:return: True if the node is removed from the graph and False otherwise
"""
q = Query()
r = db.remove((q.type == 'node') & (q.g_id == g_id) & (q.u_id == u_id))
if len(r) > 0:
db.remove((q.type == 'edge') & (q.g_id == g_id) & ((q.u_id == u_id) | (q.v_id == u_id)))
return True
return False
def remove_edge(g_id, u_id, v_id):
"""
Remove the edge linking the nodes with id **u_id** and **v_id** from the graph with id **g_id** by removing all those
information stored in the database with the type 'edge'. If no such edge exists in the graph in the database, nothing is
done.
:param g_id: id of a graph
:param u_id: id of a node
:param v_id: id of a node
:return: True if the edge is removed from the graph and False otherwise
"""
if u_id == v_id:
return False
if v_id < u_id:
u_id, v_id = v_id, u_id
q = Query()
r = db.remove((q.type == 'edge') & (q.g_id == g_id) & (q.u_id == u_id) & (q.v_id == v_id))
return len(r) > 0
def move_node(g_id, u_id, x, y):
"""
Update the coordinates of the node with id **u_id** in the graph with id **g_id** to **x** and **y** by updating
the 'x' and 'y' fields of the document containing those ids with the type 'node'. If no such node is associated
to the graph in the database, nothing is done.
:param g_id: id of a graph
:param u_id: id of a node
:param x: x coordinate of the node
:param y: y coordinate of the node
:return: True if the node is moved and False otherwise
"""
q = Query()
r = db.update({'x': x, 'y': y}, (q.type == 'node') & (q.g_id == g_id) & (q.u_id == u_id))
return len(r) > 0
def save_main_name(name):
"""
Update the name of the graph with id 0 in the database to **name** by updating the field 'name' of the document containing that
id with the type 'properties'
:param name: new name of the graph with id 0
:return: True if the name is not None and False otherwise
"""
if name is None:
return False
q = Query()
db.upsert({'type': 'properties', 'g_id': 0, 'name': name}, (q.type == 'properties') & (q.g_id == 0))
return True
def remove_graph(g_id):
"""
Remove every occurrence of the graph with id **g_id** from the database
:param g_id: id of a graph
:return: True
"""
db.remove(where('g_id') == g_id)
return True
def erase_graph(g1_id, g2_id, name=None):
"""
Erase the graph with id **g2_id** with a copy of the graph with id **g1_id**. To do so, every document where
the field 'g_id' equals **g2_id** is removed and every document where the type is not 'iso' and where
the field 'g_id' equals **g1_id** is copied, modified so that the field is replaced by **g2_id** and reinserted
in the database. If the parameter **name** is not None, the name of the copied graph is set to **name** by the
field 'name' of the document
containing that id with the type 'properties'.
g1_id should not equal g2_id otherwise nothing is done.
:param g1_id: id of a graph
:param g2_id: id of a graph
:param name: new name of the copied graph
:return: False is g1_id equals g2_id and g2_id otherwise.
"""
if g1_id == g2_id:
return False
if g2_id is None:
g2_id = max(x['g_id'] for x in db.search(where('type') == 'node')) + 1
remove_graph(g2_id)
q = Query()
for x in db.search((q.g_id == g1_id) & (q.type != 'iso')):
y = dict(x)
y['g_id'] = g2_id
db.insert(y)
if name is not None:
db.upsert({'type': 'properties', 'g_id': g2_id, 'name': name}, (q.type == 'properties') & (q.g_id == g2_id))
return g2_id
def _get_graph(g_id):
"""
A networkx graph object corresponding to the graph with id **g_id**. If no such graph exists an empty graph is
returned
:param g_id: id of a graph
:return: a networkx graph object corresponding to the graph with id **g_id**
"""
q = Query()
nodes = (res['u_id'] for res in db.search((q.g_id == g_id) & (q.type == 'node')))
edges = ((res['u_id'], res['v_id']) for res in db.search((q.g_id == g_id) & (q.type == 'edge')))
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
return g
def get_graph_dict(g_id):
"""
Return a dict containing all the information of the graph with id **g_id** stored in the database. The dict has the
following format :
{
'nodes' : list of ids of the nodes of the graph,
'edges' : list of edges of the graph, each edge is described by the ids of the two extremities of the edge
'name': the name of the graph stored in the document with type 'properties',
'iso': True if the graph id is stored with the type 'iso' and False otherwise
}
:param g_id: id of a graph
:return: A dict containing all the information of the graph with id **g_id** stored in the database
"""
q = Query()
nodes = [(res['u_id'], res['x'], res['y']) for res in db.search((q.g_id == g_id) & (q.type == 'node'))]
edges = [(res['u_id'], res['v_id']) for res in db.search((q.g_id == g_id) & (q.type == 'edge'))]
try:
name = db.search((q.type == 'properties') & (q.g_id == g_id))[0]['name']
except IndexError:
name = ''
iso = len(db.search((q.type == 'iso') & (q.g_id == g_id))) != 0
return {'nodes': nodes, 'edges': edges, 'name': name, 'iso': iso}
def get_graph_infos():
"""
Return a list of dicts containing some information of all the graphs stored in the database. Each dict has the
following format :
{
'graph_id': the id of the graph,
'name': the name of the graph,
'iso': True if the graph id is stored with the type 'iso' and False otherwise
}
:return: A list of dicts containing some information of all the graphs stored in the database.
"""
q = Query()
isos = [res['g_id'] for res in db.search(q.type == 'iso')]
names = [{'graph_id': res['g_id'], 'name': res['name'], 'iso': res['g_id'] in isos}
for res in db.search((q.type == 'properties'))]
names.sort(key=lambda x: x['name'])
return names
def get_induced_subgraphs():
"""
Return a list of dicts describing all the subgraphs of the graph with id 0 that are isomorphic to at least one graph for
which the id is stored in the database with the type 'iso'.
The list has the following format:
[
For each subgraph
{
'subgraph_id': the id of the graph stored with the type 'iso' isommorphic to the subgraph
'nodes': the list of nodes of the subgraph
'edges': the list of edges of the subgraph, each edge is described by the ids of the two extremities of
the edge
}
]
:return: a list of dicts describing all the subgraphs of the graph with id 0 that are isomorphic to at least one graph for
which the id is stored in the database with the type 'iso'.
"""
q = Query()
g = _get_graph(0)
isos = [res['g_id'] for res in db.search(q.type == 'iso')]
subgraphs = ((id, _get_graph(id)) for id in isos)
inds = set()
induced_subgraphs = []
for id, subgraph in subgraphs:
gm = isoalg.GraphMatcher(g, subgraph)
for gm in gm.subgraph_isomorphisms_iter():
k = frozenset(gm.keys())
if k not in inds:
inds.add(k)
induced_subgraphs.append({'subgraph_id': id, 'nodes': list(k), 'edges': list(nx.Graph.subgraph(g, k).edges())})
return induced_subgraphs
| StarcoderdataPython |
9656033 | import uuid
from app import db
from app.dao.dao_utils import transactional
from app.models import InboundNumber
def dao_get_inbound_numbers():
return InboundNumber.query.order_by(InboundNumber.updated_at).all()
def dao_get_available_inbound_numbers():
return InboundNumber.query.filter(InboundNumber.active, InboundNumber.service_id.is_(None)).all()
def dao_get_inbound_number_for_service(service_id):
return InboundNumber.query.filter(InboundNumber.service_id == service_id).first()
def dao_get_inbound_number(inbound_number_id):
return InboundNumber.query.filter(InboundNumber.id == inbound_number_id).first()
@transactional
def dao_set_inbound_number_to_service(service_id, inbound_number):
inbound_number.service_id = service_id
db.session.add(inbound_number)
@transactional
def dao_set_inbound_number_active_flag(service_id, active):
inbound_number = InboundNumber.query.filter(InboundNumber.service_id == service_id).first()
inbound_number.active = active
db.session.add(inbound_number)
@transactional
def dao_allocate_number_for_service(service_id, inbound_number_id):
updated = InboundNumber.query.filter_by(
id=inbound_number_id,
active=True,
service_id=None
).update(
{"service_id": service_id}
)
if not updated:
raise Exception("Inbound number: {} is not available".format(inbound_number_id))
return InboundNumber.query.get(inbound_number_id)
def dao_add_inbound_number(inbound_number):
obj = InboundNumber(
id=uuid.uuid4(),
number=inbound_number,
provider='pinpoint',
active=True,
)
db.session.add(obj)
db.session.commit()
| StarcoderdataPython |
3500829 | # -*- coding: utf-8 -*-
import argparse
import logging
import os
import numpy as np
import scipy.io as sio
from matplotlib import pyplot as plt
import utils
from model import dsfa
net_shape = [128, 128, 6]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.basicConfig(format='%(asctime)-15s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
def parser():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-e','--epoch',help='epoches',default=2000, type=int)
parser.add_argument('-l','--lr',help='learning rate',default=5*1e-5, type=float)
parser.add_argument('-r','--reg',help='regularization parameter',default=1e-4, type=float)
parser.add_argument('-t','--trn',help='number of training samples',default=2000, type=int)
parser.add_argument('-g','--gpu', help='GPU ID', default='0')
parser.add_argument('--area',help='datasets', default='river')
args = parser.parse_args()
return args
def main(img1, img2, chg_map, args=None):
img_shape = np.shape(img1)
im1 = np.reshape(img1, newshape=[-1,img_shape[-1]])
im2 = np.reshape(img2, newshape=[-1,img_shape[-1]])
im1 = utils.normlize(im1)
im2 = utils.normlize(im2)
chg_ref = np.reshape(chg_map, newshape=[-1])
imm = None
all_magnitude = None
differ = np.zeros(shape=[np.shape(chg_ref)[0],net_shape[-1]])
# load cva pre-detection result
ind = sio.loadmat(args.area+'/cva_ref.mat')
cva_ind = ind['cva_ref']
cva_ind = np.reshape(cva_ind, newshape=[-1])
i1, i2 = utils.getTrainSamples(cva_ind, im1, im2, args.trn)
loss_log, vpro, fcx, fcy, bval = dsfa(
xtrain=i1, ytrain=i2, xtest=im1, ytest=im2, net_shape=net_shape, args=args)
imm, magnitude, differ_map = utils.linear_sfa(fcx, fcy, vpro, shape=img_shape)
magnitude = np.reshape(magnitude, img_shape[0:-1])
differ = differ_map
change_map = np.reshape(utils.kmeans(np.reshape(magnitude, [-1])), img_shape[0:-1])
# magnitude
acc_un, acc_chg, acc_all2, acc_tp = utils.metric(1-change_map, chg_map)
acc_un, acc_chg, acc_all3, acc_tp = utils.metric(change_map, chg_map)
plt.imsave('results.png',change_map, cmap='gray')
#plt.show()
return None
if __name__ == '__main__':
args = parser()
img1, img2, chg_map = utils.data_loader(area=args.area)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
main(img1, img2, chg_map, args=args)
| StarcoderdataPython |
4855885 | # Light LEDs at random and make them fade over time
#
# Usage:
#
# led_dance(delay)
#
# 'delay' is the time between each new LED being turned on.
#
# TODO The random number generator is not great. Perhaps the accelerometer
# or compass could be used to add entropy.
import microbit
import random
def led_dance(delay):
dots = [ [0]*5, [0]*5, [0]*5, [0]*5, [0]*5 ]
while True:
dots[random.randrange(5)][random.randrange(5)] = 8
for i in range(5):
for j in range(5):
microbit.display.set_pixel(i, j, dots[i][j])
dots[i][j] = max(dots[i][j] - 1, 0)
microbit.sleep(delay)
led_dance(100)
| StarcoderdataPython |
319641 | <filename>slack/tests/conftest.py
import copy
import json
import time
import functools
from unittest.mock import Mock
import pytest
import requests
import asynctest
from slack.events import Event, EventRouter, MessageRouter
from slack.io.abc import SlackAPI
from slack.actions import Action
from slack.actions import Router as ActionRouter
from slack.commands import Router as CommandRouter
from slack.commands import Command
from . import data
try:
from slack.io.requests import SlackAPI as SlackAPIRequest
except ImportError:
SlackAPIRequest = None
TOKEN = "abcdefg"
class FakeIO(SlackAPI):
async def _request(self, method, url, headers, body):
pass
async def sleep(self, seconds):
time.sleep(seconds)
async def _rtm(self, url):
pass
@pytest.fixture(params=(data.RTMEvents.__members__,))
def rtm_iterator(request):
async def events(url):
for key in request.param:
yield data.RTMEvents[key].value
return events
@pytest.fixture(params=(data.RTMEvents.__members__,))
def rtm_iterator_non_async(request):
def events(url):
for key in request.param:
yield data.RTMEvents[key].value
return events
@pytest.fixture(params=(FakeIO,))
def io_client(request):
return request.param
@pytest.fixture(
params=(
{"retry_when_rate_limit": True, "token": TOKEN},
{"retry_when_rate_limit": False, "token": TOKEN},
)
)
def client(request, io_client):
default_request = {
"status": 200,
"body": {"ok": True},
"headers": {"content-type": "application/json; charset=utf-8"},
}
if "_request" not in request.param:
request.param["_request"] = default_request
elif isinstance(request.param["_request"], dict):
request.param["_request"] = _default_response(request.param["_request"])
elif isinstance(request.param["_request"], list):
for index, item in enumerate(request.param["_request"]):
request.param["_request"][index] = _default_response(item)
else:
raise ValueError("Invalid `_request` parameters: %s", request.param["_request"])
if "token" not in request.param:
request.param["token"] = TOKEN
slackclient = io_client(
**{k: v for k, v in request.param.items() if not k.startswith("_")}
)
if isinstance(request.param["_request"], dict):
return_value = (
request.param["_request"]["status"],
json.dumps(request.param["_request"]["body"]).encode(),
request.param["_request"]["headers"],
)
if isinstance(slackclient, SlackAPIRequest):
slackclient._request = Mock(return_value=return_value)
else:
slackclient._request = asynctest.CoroutineMock(return_value=return_value)
else:
responses = [
(
response["status"],
json.dumps(response["body"]).encode(),
response["headers"],
)
for response in request.param["_request"]
]
if isinstance(slackclient, SlackAPIRequest):
slackclient._request = Mock(side_effect=responses)
else:
slackclient._request = asynctest.CoroutineMock(side_effect=responses)
return slackclient
def _default_response(response):
default_response = {
"status": 200,
"body": {"ok": True},
"headers": {"content-type": "application/json; charset=utf-8"},
}
response = {**default_response, **response}
if "content-type" not in response["headers"]:
response["headers"]["content-type"] = default_response["headers"][
"content-type"
]
if isinstance(response["body"], str):
response["body"] = copy.deepcopy(data.Methods[response["body"]].value)
return response
@pytest.fixture(params={**data.Events.__members__, **data.Messages.__members__})
def raw_event(request):
if isinstance(request.param, str):
try:
return copy.deepcopy(data.Events[request.param].value)
except KeyError:
pass
try:
return copy.deepcopy(data.Messages[request.param].value)
except KeyError:
pass
raise KeyError(f'Event "{request.param}" not found')
else:
return copy.deepcopy(request.param)
@pytest.fixture(params={**data.Events.__members__, **data.Messages.__members__})
def event(request):
return Event.from_http(raw_event(request))
@pytest.fixture(params={**data.Messages.__members__})
def message(request):
return Event.from_http(raw_event(request))
@pytest.fixture()
def token():
return copy.copy(TOKEN)
@pytest.fixture()
def itercursor():
return "wxyz"
@pytest.fixture()
def event_router():
return EventRouter()
@pytest.fixture()
def message_router():
return MessageRouter()
@pytest.fixture(
params={
**data.InteractiveMessage.__members__,
**data.DialogSubmission.__members__,
**data.MessageAction.__members__,
}
)
def action(request):
return Action.from_http(raw_action(request))
@pytest.fixture(params={**data.InteractiveMessage.__members__})
def interactive_message(request):
return Action.from_http(raw_action(request))
@pytest.fixture(params={**data.DialogSubmission.__members__})
def dialog_submission(request):
return Action.from_http(raw_action(request))
@pytest.fixture(params={**data.MessageAction.__members__})
def message_action(request):
return Action.from_http(raw_action(request))
@pytest.fixture(
params={
**data.InteractiveMessage.__members__,
**data.DialogSubmission.__members__,
**data.MessageAction.__members__,
}
)
def raw_action(request):
if isinstance(request.param, str):
try:
return copy.deepcopy(data.InteractiveMessage[request.param].value)
except KeyError:
pass
try:
return copy.deepcopy(data.DialogSubmission[request.param].value)
except KeyError:
pass
return copy.deepcopy(data.MessageAction[request.param].value)
else:
return copy.deepcopy(request.param)
@pytest.fixture()
def action_router():
return ActionRouter()
@pytest.fixture(params={**data.Commands.__members__})
def raw_command(request):
if isinstance(request.param, str):
return copy.deepcopy(data.Commands[request.param].value)
else:
return copy.deepcopy(request.param)
@pytest.fixture(params={**data.Commands.__members__})
def command(request):
return Command(raw_command(request))
@pytest.fixture()
def command_router():
return CommandRouter()
| StarcoderdataPython |
8192536 | <reponame>shawnmullaney/python-isc-dhcp-leases
from distutils.core import setup, Command
def discover_and_run_tests():
import os
import sys
import unittest
# get setup.py directory
setup_file = sys.modules['__main__'].__file__
setup_dir = os.path.abspath(os.path.dirname(setup_file))
# use the default shared TestLoader instance
test_loader = unittest.defaultTestLoader
# use the basic test runner that outputs to sys.stderr
test_runner = unittest.TextTestRunner()
# automatically discover all tests
# NOTE: only works for python 2.7 and later
test_suite = test_loader.discover(setup_dir)
# run the test suite
result = test_runner.run(test_suite)
if len(result.failures) + len(result.errors) > 0:
exit(1)
class DiscoverTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
discover_and_run_tests()
setup(
name='isc_dhcp_leases',
version='0.9.1',
packages=['isc_dhcp_leases'],
url='https://github.com/MartijnBraam/python-isc-dhcp-leases',
install_requires=['six'],
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Small python module for reading /var/lib/dhcp/dhcpd.leases from isc-dhcp-server',
cmdclass={'test': DiscoverTest},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
| StarcoderdataPython |
289919 | from io import BytesIO
from os import makedirs, path
from configparser import ConfigParser, SectionProxy
from rich import print
from jinja2 import Environment
import click
from docker import APIClient, errors
from freshenv.console import console
from freshenv.provision import get_dockerfile_path
from requests import exceptions
homedir = path.expanduser("~")
freshenv_config_location = homedir + "/.freshenv/freshenv"
def create_dockerfile(base: str, install: str, cmd: str) -> str:
contents = get_dockerfile_path("simple")
template = Environment(autoescape=True).from_string(str(contents.decode("utf-8")))
build_template = template.render(base=base, install=install, cmd=cmd)
return build_template
def config_exists() -> bool:
if not path.isfile(freshenv_config_location):
return False
return True
def get_key_values_from_config(flavour: str) -> SectionProxy:
config = ConfigParser()
config.read(freshenv_config_location)
return config[flavour]
def env_exists(flavour: str) -> bool:
config = ConfigParser()
config.read(freshenv_config_location)
if flavour not in config.sections():
return False
return True
def mandatory_keys_exists(flavour: str) -> bool:
config = ConfigParser()
config.read(freshenv_config_location)
if "base" not in config[flavour]:
return False
if "install" not in config[flavour]:
return False
if "cmd" not in config[flavour]:
return False
return True
def create_file(location: str) -> None:
makedirs(path.dirname(location), exist_ok=True)
open(location, "w", encoding="utf8").close()
def run_checks(flavour: str) -> bool:
if not config_exists():
print(f":card_index: No config file found. Creating an empty config at {freshenv_config_location}.")
create_file(freshenv_config_location)
return False
if not env_exists(flavour):
print(f":exclamation_mark:configuration for custom flavour {flavour} does not exist.")
return False
if not mandatory_keys_exists(flavour):
print(":exclamation_mark: missing mandatory keys in configuration for custom environment {flavour}.")
return False
return True
@click.command("build")
@click.argument("flavour")
@click.option('--logs', '-l', is_flag=True, help="Show build logs")
def build(flavour: str, logs: bool) -> None:
"""Build a custom freshenv flavour."""
if not run_checks(flavour):
return
flavour_config = get_key_values_from_config(flavour)
flavour_dockerfile = create_dockerfile(flavour_config["base"], flavour_config["install"], flavour_config["cmd"])
try:
client = APIClient(base_url="unix://var/run/docker.sock")
with console.status("Building custom flavour...", spinner="point"):
for line in client.build(fileobj=BytesIO(flavour_dockerfile.encode("utf-8")), tag=f"raiyanyahya/freshenv-flavours/{flavour}", rm=True, pull=True, decode=True):
if "errorDetail" in line:
raise Exception(line["errorDetail"]["message"])
if logs:
print(line)
print(f":party_popper: Successfully built custom flavour {flavour}. You can provision it by running [bold]freshenv provision -f {flavour}[/bold].")
except (errors.APIError, exceptions.HTTPError):
print(":x: Custom flavour could not be built. Try again after cleaning up with [bold]fr clean --force [/bold]")
except Exception as e:
print(f":x: Custom flavour could not be built due to the error: {e}.") | StarcoderdataPython |
1659967 | <reponame>harshlohia11/Text-Detection
from imutils.object_detection import non_max_suppression
import numpy as np
import cv2
import pytesseract
import argparse
import time
ap=argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str,
help="path to input image")
ap.add_argument("-east", "--east", type=str,
help="path to input EAST text detector")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5,
help="minimum probability required to inspect a region")
ap.add_argument("-w", "--width", type=int, default=320,
help="resized image width (should be multiple of 32)")
ap.add_argument("-e", "--height", type=int, default=320,
help="resized image height (should be multiple of 32)")
args = vars(ap.parse_args())
image=cv2.imread(args["image"])
original=image.copy()
(H,W)=image.shape[:2]
(newh,neww)=(args["width"],args["height"])
rw=W/float(neww)
rh=H/float(newh)
#now we will resize the image beacause our east detection modul works on 32* pixels imag
image=cv2.resize(image,(neww,newh))
(H,W)=image.shape[:2]
layers=["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]
#the first layer is a sigmoid activation function that gives us the probability if a text is there or not
#the second layer gives us the geometric dimensions of the text in the image.
#now loading the east text detector
print("[INFO] loading EAST text detector...")
net=cv2.dnn.readNet(args["east"])
blob=cv2.dnn.blobFromImage(image,1.0,(W,H),(123.68, 116.78, 103.94),swapRB=True,crop=False)
start = time.time()
net.setInput(blob)
(scores,geometry)=net.forward(layers) #will return the probablistic score and geometry
end = time.time()
print("[INFO] text detection took {:.6f} seconds".format(end - start))
(nRows,nColumns)=scores.shape[2:4]
#print(nRows)
#print(nColumns)
cord=[] #will store the geometric dimensions of the test
confidence=[] #will store the probabilistic score
for y in range(0,nRows,1):
scoresData=scores[0,0,y]
x0=geometry[0,0,y]
x1=geometry[0,1,y]
x2=geometry[0,2,y]
x3=geometry[0,3,y]
anglesData=geometry[0,4,y]
for x in range(0,nColumns,1):
#if our score doesnt matches minimum required confidence we set it will ignore that part
if scoresData[x]<args["min_confidence"]:
continue
#now when we are using the East detector it resizes the image to four time smaller so now we will give
#it is original size by multiplying it by four
(offsetX,offsetY)=(x*4.0,y*4.0)
angle=anglesData[x] #extracting the rotation angle
cos=np.cos(angle)
sin=np.sin(angle)
#now we will find out the box coordinates
h= x0[x] + x2[x]
w= x1[x] + x3[x]
endX = int(offsetX + (cos * x1[x]) + (sin * x2[x]))
endY = int(offsetY - (sin * x1[x]) + (cos * x2[x]))
startX = int(endX - w)
startY = int(endY - h)
cord.append((startX, startY, endX, endY))
confidence.append(scoresData[x])
box=non_max_suppression(np.array(cord),probs=confidence)
for (startX,startY,endX,endY) in box:
#now we will scale the coordinates of the boxes back to its original size
startX=int(startX*rw)
startY=int(startY*rh)
endX=int(endX*rw)
endY=int(endY*rh)
#now we will use the copy image we created at the beginning to draw the bounding box
cv2.rectangle(original,(startX,startY),(endX,endY),(0,0,255),3)
cv2.imshow("Text Image",original)
cv2.waitKey(0)
| StarcoderdataPython |
3400623 | class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
res = []
numset = set(nums)
N = len(nums)
for num in range(1, N + 1):
if num not in numset:
res.append(num)
return res
p = Solution()
nums = [4,3,2,7,8,2,3,1]
print(p.findDisappearedNumbers(nums)) | StarcoderdataPython |
5035100 | from typing import List
from data_sets_reporter.classes.data_class.data_set_info_for_reporter import DataSetInfoForReporter
from data_sets_reporter.classes.data_set_string_reporter.data_set_validator.data_set_report_validator import DataSetValidator
from data_sets_reporter.exceptions.register_exeptions import WrongInputFormatError, NonIterableObjectError
class DataSetInfoValidator(DataSetValidator):
def validate(self, data_sets_info: List[DataSetInfoForReporter]):
self.__check_is_none(data_sets_info)
self.__check_is_not_an_array(data_sets_info)
self.__check_elements_of_array(data_sets_info)
def __check_is_none(self, data_sets):
if data_sets is None:
raise WrongInputFormatError
def __check_is_not_an_array(self, data_sets):
if not isinstance(data_sets, list):
raise NonIterableObjectError
def __check_elements_of_array(self, array):
for element in array:
self.__check_if_element_is_instance_of_data_set_info_class(element)
self.__check_is_first_element_typeof_string(element)
self.__check_if_second_element_typeof_list(element)
self.__check_if_list_elements_are_typeof_string(element)
def __check_if_element_is_instance_of_data_set_info_class(self, element):
if not isinstance(element, DataSetInfoForReporter):
raise WrongInputFormatError
def __check_is_first_element_typeof_string(self, element):
if not isinstance(element.data_set_name, str):
raise WrongInputFormatError
def __check_if_second_element_typeof_list(self, element):
if not isinstance(element.data_set_columns, list):
raise WrongInputFormatError
def __check_if_list_elements_are_typeof_string(self, element):
if element.data_set_columns.__len__() > 0:
for column_name in element.data_set_columns:
if not isinstance(column_name, str):
raise WrongInputFormatError
| StarcoderdataPython |
12864929 | <reponame>AntonVasko/CodeClub-2021-SUMMER<filename>4. 01.07.2021/0. Secret Messages. New position.py
#Secret Messages. New position
alphabet = 'abcdefghijklmnopqrstuvwxyz'
key = 3
character = input('Please enter a character ')
position = alphabet.find(character)
print('Position of a character ', character, ' is ', position)
newPosition = position + key
print('New position of a character ', character, ' is ', newPosition)
| StarcoderdataPython |
4871474 | <filename>SfmLearner-Pytorch/loss_functions.py
from __future__ import division
import torch
from torch import nn
import torch.nn.functional as F
from inverse_warp import inverse_warp
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.refl = nn.ReflectionPad2d(1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y):
x = self.refl(x)
y = self.refl(y)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
def photometric_reconstruction_loss_ssim(tgt_img, ref_imgs, intrinsics,
depth, explainability_mask, pose,
rotation_mode='euler', padding_mode='zeros', mini_reproj=False, auto_mask=False):
def one_scale(depth, explainability_mask):
assert(explainability_mask is None or depth.size()[2:] == explainability_mask.size()[2:])
assert(pose.size(1) == len(ref_imgs))
reconstruction_loss = 0
b, _, h, w = depth.size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = F.interpolate(tgt_img, (h, w), mode='area')
ref_imgs_scaled = [F.interpolate(ref_img, (h, w), mode='area') for ref_img in ref_imgs]
intrinsics_scaled = torch.cat((intrinsics[:, 0:2]/downscale, intrinsics[:, 2:]), dim=1)
warped_imgs = []
diff_maps = []
pes = []
ssim = SSIM()
ssim = ssim.cuda()
for i, ref_img in enumerate(ref_imgs_scaled):
current_pose = pose[:, i]
ref_img_warped, valid_points = inverse_warp(ref_img, depth[:,0], current_pose,
intrinsics_scaled,
rotation_mode, padding_mode)
diff = (0.15 * (tgt_img_scaled - ref_img_warped)+ 0.85 * ssim(tgt_img_scaled,ref_img_warped))
if explainability_mask is not None:
diff = diff * explainability_mask[:,i:i+1].expand_as(diff)
reproj_loss = diff.abs().mean(1, True)
pes.append(reproj_loss)
warped_imgs.append(ref_img_warped[0])
diff_maps.append(diff[0])
if mini_reproj:
pes = torch.cat(pes, 1)
pe, _ = torch.min(pes, dim=1, keepdim=True)
else:
pe = torch.cat(pes, 1)
if auto_mask:
pes_o = []
for i, ref_img in enumerate(ref_imgs_scaled):
diff = 0.15 * (tgt_img_scaled - ref_img)+ 0.85 * ssim(tgt_img_scaled,ref_img)
reproj_loss = diff.abs().mean(1, True)
pes_o.append(reproj_loss)
if mini_reproj:
pes_o = torch.cat(pes_o, 1)
pe_o, _ = torch.min(pes_o, dim=1, keepdim=True)
else:
pe_o = torch.cat(pes_o, 1)
mask = (pe < pe_o).float()
pe = pe * mask
reconstruction_loss = pe.mean()
return reconstruction_loss, warped_imgs, diff_maps
warped_results, diff_results = [], []
if type(explainability_mask) not in [tuple, list]:
explainability_mask = [explainability_mask]
if type(depth) not in [list, tuple]:
depth = [depth]
total_loss = 0
for d, mask in zip(depth, explainability_mask):
loss, warped, diff = one_scale(d, mask)
total_loss += loss
warped_results.append(warped)
diff_results.append(diff)
return total_loss, warped_results, diff_results
def photometric_reconstruction_loss(tgt_img, ref_imgs, intrinsics,
depth, explainability_mask, pose,
rotation_mode='euler', padding_mode='zeros', mini_reproj=False, auto_mask=False):
def one_scale(depth, explainability_mask):
assert(explainability_mask is None or depth.size()[2:] == explainability_mask.size()[2:])
assert(pose.size(1) == len(ref_imgs))
#reconstruction_loss = 0
b, _, h, w = depth.size()
downscale = tgt_img.size(2)/h
tgt_img_scaled = F.interpolate(tgt_img, (h, w), mode='area')
ref_imgs_scaled = [F.interpolate(ref_img, (h, w), mode='area') for ref_img in ref_imgs]
intrinsics_scaled = torch.cat((intrinsics[:, 0:2]/downscale, intrinsics[:, 2:]), dim=1)
warped_imgs = []
diff_maps = []
pes = []
for i, ref_img in enumerate(ref_imgs_scaled):
current_pose = pose[:, i]
ref_img_warped, valid_points = inverse_warp(ref_img, depth[:,0], current_pose,
intrinsics_scaled,
rotation_mode, padding_mode)
diff = (tgt_img_scaled - ref_img_warped) * valid_points.unsqueeze(1).float()
if explainability_mask is not None:
diff = diff * explainability_mask[:,i:i+1].expand_as(diff)
l1_loss = diff.abs().mean(1, True)
pes.append(l1_loss)
warped_imgs.append(ref_img_warped[0])
diff_maps.append(diff[0])
if mini_reproj:
pes = torch.cat(pes, 1)
pe, _ = torch.min(pes, dim=1, keepdim=True)
else:
pe = torch.cat(pes, 1)
if auto_mask:
pes_o = []
for i, ref_img in enumerate(ref_imgs_scaled):
diff = tgt_img_scaled - ref_img
l1_loss = diff.abs().mean(1, True)
pes_o.append(l1_loss)
if mini_reproj:
pes_o = torch.cat(pes_o, 1)
pe_o, _ = torch.min(pes_o, dim=1, keepdim=True)
else:
pe_o = torch.cat(pes_o, 1)
mask = (pe < pe_o).float()
pe = pe * mask
reconstruction_loss = pe.mean() * len(ref_imgs)
return reconstruction_loss, warped_imgs, diff_maps
warped_results, diff_results = [], []
if type(explainability_mask) not in [tuple, list]:
explainability_mask = [explainability_mask]
if type(depth) not in [list, tuple]:
depth = [depth]
total_loss = 0
for d, mask in zip(depth, explainability_mask):
loss, warped, diff = one_scale(d, mask)
total_loss += loss
warped_results.append(warped)
diff_results.append(diff)
return total_loss, warped_results, diff_results
def explainability_loss(mask):
if type(mask) not in [tuple, list]:
mask = [mask]
loss = 0
for mask_scaled in mask:
ones_var = torch.ones_like(mask_scaled)
loss += nn.functional.binary_cross_entropy(mask_scaled, ones_var)
return loss
def smooth_loss(pred_map):
def gradient(pred):
D_dy = pred[:, :, 1:] - pred[:, :, :-1]
D_dx = pred[:, :, :, 1:] - pred[:, :, :, :-1]
return D_dx, D_dy
if type(pred_map) not in [tuple, list]:
pred_map = [pred_map]
loss = 0
weight = 1.
for scaled_map in pred_map:
dx, dy = gradient(scaled_map)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss += (dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean())*weight
weight /= 2.3 # don't ask me why it works better
return loss
def smooth_loss_2(pred_map, tgt_img):
def gradient(img):
grad_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
grad_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
return grad_x, grad_y
if type(pred_map) not in [tuple, list]:
pred_map = [pred_map]
loss = 0
weight = 1.
for scaled_map in pred_map:
b, _, h, w = scaled_map.size()
tgt_img_scaled = F.interpolate(tgt_img, (h, w), mode='area')
mean_map = scaled_map.mean(2, True).mean(3, True)
norm_map = scaled_map / (mean_map + 1e-7)
grad_disp_x, grad_disp_y = gradient(norm_map)
grad_img_x, grad_img_y = gradient(tgt_img_scaled)
grad_disp_x *= torch.exp(-grad_img_x)
grad_disp_y *= torch.exp(-grad_img_y)
loss += (grad_disp_x.mean() + grad_disp_y.mean())*weight
weight /= 2
return loss
@torch.no_grad()
def compute_errors(gt, pred, crop=True):
abs_diff, abs_rel, sq_rel, a1, a2, a3 = 0,0,0,0,0,0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1,y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1,x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2,x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 80)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 80)
valid_pred = valid_pred * torch.median(valid_gt)/torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))
abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt)
sq_rel += torch.mean(((valid_gt - valid_pred)**2) / valid_gt)
return [metric.item() / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3]]
| StarcoderdataPython |
1837621 | #!/usr/bin/env python
# Copyright 2014 Netflix
"""Append missing newlines to the end of source code files
"""
import os
import stat
SOURCE_CODE_EXTENSIONS = set(('py',)) # 'css','js','html',...
def walk(path):
"""Wraps os.walk"""
result = []
for root, _, filenames in os.walk(path):
for name in filenames:
result.append(os.path.join(root, name))
return result
def get_last_byte(name):
"""Return the last byte in a file"""
with open(name, 'r') as infp:
infp.seek(-1, 2)
return infp.read(1)
def configure(args):
args.add_argument('-n', '--dry-run', action='store_true', help='dry run')
args.add_argument('name_list', metavar='NAME', nargs='+', help='file or directory name')
def main(args):
files = []
for name in args.name_list:
name = os.path.abspath(name)
fstat = os.stat(name)
if stat.S_ISDIR(fstat.st_mode):
files.extend(walk(name))
else:
files.append(name)
source_code_files = [ name for name in files if name.rpartition('.')[-1] in SOURCE_CODE_EXTENSIONS ]
missing_last_newline = [ name for name in source_code_files if get_last_byte(name) != '\n' ]
if args.dry_run:
print 'Missing newlines at the end of %d files:' % len(missing_last_newline)
for name in missing_last_newline:
print ' ', name
else:
for name in missing_last_newline:
if os.access(name, os.W_OK):
print 'Fixing', name
with open(name, 'a') as fpout:
fpout.write('\n')
| StarcoderdataPython |
1834596 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import re
from os.path import dirname, join
from setuptools import find_packages, setup
with open(join(dirname(__file__), 'pipelines', '__init__.py')) as fp:
for line in fp:
m = re.search(r'^\s*__version__\s*=\s*([\'"])([^\'"]+)\1\s*$', line)
if m:
version = m.group(2)
break
else:
raise RuntimeError('Unable to find own __version__ string')
def get_requirements(reqfile):
with open(reqfile) as f:
return f.read().splitlines()
extras = {
"testing": [
"pytest>=4.4.0",
"pytest-xdist==1.31.0",
"pytest-cov==2.8.1",
"flake8==3.7.9",
]
}
setup(
name='pipelines',
version=version,
description='Manage pipelines.',
license='Apache License 2.0',
packages=find_packages(),
install_requires=get_requirements('requirements.txt'),
extras_require=extras,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/platiagro/pipelines',
entry_points={
"console_scripts": [
"platiagro-init-db = pipelines.database:init_db",
]
},
)
| StarcoderdataPython |
6482770 | <gh_stars>0
class Solution:
def PredictTheWinner(self, nums: List[int]) -> bool:
dp = {}
def getMaxDiff(left, right):
if (left, right) not in dp:
if left == right:
return nums[left]
dp[left, right] = max(nums[left] - getMaxDiff(left+1, right), nums[right] - getMaxDiff(left, right-1))
return dp[left, right]
return getMaxDiff(0, len(nums)-1) >= 0
| StarcoderdataPython |
1923410 | <reponame>thevahidal/hoopoe-python
from decouple import config
from hoopoe import Hoopoe
hoopoe = Hoopoe(
api_key=config("API_KEY"),
version=config("VERSION", default="1"),
base_url=config("BASE_URL", default="https://api.hoopoe.com"),
)
print(hoopoe.timestamp())
print(hoopoe.upupa("Hello World!"))
| StarcoderdataPython |
8168597 | <gh_stars>1-10
class Solution:
def calPoints(self, ops: List[str]) -> int:
stack = []
for op in ops:
if op == 'C':
stack.pop()
elif op == 'D':
v = stack.pop()
stack.append(v)
stack.append(v * 2)
elif op == '+':
if len(stack) > 1:
v1 = stack.pop()
v2 = stack.pop()
stack.append(v2)
stack.append(v1)
stack.append(v1 + v2)
else:
if op != 'C' and op != 'D' and op !='+':
stack.append(int(op))
else:
stack.append(op)
return sum(stack)
| StarcoderdataPython |
3254378 | <filename>src/background.py
# Copyright (C) 2022 viraelin
# License: MIT
from PyQt6.QtCore import *
from PyQt6.QtWidgets import *
from PyQt6.QtGui import *
class Background(QGraphicsRectItem):
def __init__(self) -> None:
super().__init__()
self.setZValue(-1000)
size = 800000
size_half = size / 2
rect = QRectF(-size_half, -size_half, size, size)
self.setRect(rect)
xp1 = QPoint(-size, 0)
xp2 = QPoint(size, 0)
self._line_x = QLine(xp1, xp2)
yp1 = QPoint(0, -size)
yp2 = QPoint(0, size)
self._line_y = QLine(yp1, yp2)
self._axis_color = QColor("#111111")
self._pen = QPen()
self._pen.setColor(self._axis_color)
self._pen.setWidth(4)
self._pen.setCosmetic(True)
self._pen.setStyle(Qt.PenStyle.SolidLine)
self._pen.setCapStyle(Qt.PenCapStyle.SquareCap)
self._pen.setJoinStyle(Qt.PenJoinStyle.MiterJoin)
self._background_color = QColor("#222222")
self._brush = QBrush()
self._brush.setColor(self._background_color)
self._brush.setStyle(Qt.BrushStyle.SolidPattern)
def paint(self, painter: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget) -> None:
painter.setPen(self._pen)
painter.setBrush(self._brush)
painter.drawRect(self.rect())
painter.drawLine(self._line_x)
painter.drawLine(self._line_y)
| StarcoderdataPython |
9615131 | <reponame>JackieMa000/problems<filename>test_240.py<gh_stars>0
# https://leetcode-cn.com/problems/search-a-2d-matrix-ii/
import unittest
from typing import List
class Solution:
def binary_search(self, nums: List[int], target: int) -> int:
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
return True
elif nums[mid] < target:
left = mid + 1
else:
right = mid - 1
return False
# 对角线遍历矩阵,从左上角开始遍历, binary_search变体,
def searchMatrix_3(self, matrix: List[List[int]], target: int) -> bool:
if not matrix or not matrix[0]: return False
m, n = len(matrix), len(matrix[0])
x, y = 0, 0
while x < m and y < n:
row = [matrix[x][j] for j in range(y, n)]
if self.binary_search(row, target): return True
column = [matrix[i][y] for i in range(x, m)]
if self.binary_search(column, target): return True
x += 1
y += 1
return False
# 对角线遍历矩阵,从右上角开始遍历, binary_search变体,
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
if not matrix or not matrix[0]: return False
m, n = len(matrix), len(matrix[0])
x, y = 0, n - 1
while x < m and y >= 0:
if matrix[x][y] == target:
return True
elif matrix[x][y] < target:
column = [matrix[i][y] for i in range(x, m)]
if self.binary_search(column, target): return True
else:
row = [matrix[x][j] for j in range(y + 1)]
if self.binary_search(row, target): return True
x += 1
y -= 1
return False
# 剪枝 binary_search变体
def searchMatrix_1(self, matrix: List[List[int]], target: int) -> bool:
if not matrix or not matrix[0]: return False
m, n = len(matrix), len(matrix[0])
x, y = m - 1, 0
while x >= 0 and y < n:
if matrix[x][y] == target:
return True
elif matrix[x][y] < target:
y += 1
else:
x -= 1
return False
class MyTestCase(unittest.TestCase):
def test_something(self):
matrix = [[]]
self.assertEqual(False, Solution().searchMatrix(matrix, 5))
matrix = [
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
self.assertEqual(True, Solution().searchMatrix(matrix, 5))
self.assertEqual(False, Solution().searchMatrix(matrix, 20))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6504857 | import json
import logging
from pathlib import Path
from flow_py_sdk.cadence import Address
from flow_py_sdk.signer import InMemorySigner, HashAlgo, SignAlgo
log = logging.getLogger(__name__)
class Config(object):
def __init__(self) -> None:
super().__init__()
self.access_node_host: str = "localhost"
self.access_node_port: int = 3569
self.service_account_key_id: int = 0
config_location = Path(__file__).parent.joinpath("../flow.json")
try:
with open(config_location) as json_file:
data = json.load(json_file)
self.service_account_address = Address.from_hex(
data["accounts"]["emulator-account"]["address"]
)
self.service_account_signer = InMemorySigner(
HashAlgo.from_string(
data["accounts"]["emulator-account"]["hashAlgorithm"]
),
SignAlgo.from_string(
data["accounts"]["emulator-account"]["sigAlgorithm"]
),
data["accounts"]["emulator-account"]["keys"],
)
except Exception:
log.warning(
f"Cannot open {config_location}, using default settings",
exc_info=True,
stack_info=True,
)
| StarcoderdataPython |
11279129 | <filename>publisher.py
import paho.mqtt.client as paho
import time
def on_publish(client, userdata, mid):
print("mid: "+str(mid))
client = paho.Client()
client.on_publish = on_publish
client.username_pw_set("ylfxubjy", "Bo3U7GcN5NAF")
client.connect("postman.cloudmqtt.com", 14843, 60)
client.loop_start()
while True:
teksdikirim="<NAME>"
client.publish("/percobaan", str(teksdikirim), qos=1)
time.sleep(1) | StarcoderdataPython |
5122355 | <filename>app.py
from flask import Flask
from flask import request, jsonify
app = Flask(__name__)
def change(amount):
# calculate the resultant change and store the result (res)
res = []
coins = [1, 5, 10, 25] # value of pennies, nickels, dimes, quarters
coin_lookup = {25: "quarters", 10: "dimes", 5: "nickels", 1: "pennies"}
# divide the amount*100 (the amount in cents) by a coin value
# record the number of coins that evenly divide and the remainder
coin = coins.pop()
num, rem = divmod(int(amount * 100), coin)
# append the coin type and number of coins that had no remainder
res.append({num: coin_lookup[coin]})
# while there is still some remainder, continue adding coins to the result
while rem > 0:
coin = coins.pop()
num, rem = divmod(rem, coin)
if num:
if coin in coin_lookup:
res.append({num: coin_lookup[coin]})
return res
def multiply(amount, multiplier=100):
# Multiply the change amount by a fixed value
res_change = change(amount)
print(f"This is the {res_change} x 100")
res = []
for coin in res_change:
ncoins = next(iter(coin))
res.append({int(ncoins) * multiplier: coin.get(ncoins)})
return res
@app.route("/")
def hello():
"""Return a friendly HTTP greeting."""
print("I am inside hello world")
return "Hello World! I can make change at route: /change/<dollar>/<cents> or I can multiply by 100: /multiply/<dollar>/<cents>"
@app.route("/change/<dollar>/<cents>")
def changeroute(dollar, cents):
print(f"Make Change for {dollar}.{cents}")
amount = f"{dollar}.{cents}"
result = change(float(amount))
return jsonify(result)
@app.route("/change-json", methods=["POST"])
def changejsonroute():
res = []
content = request.args.to_dict(flat=False)
key = next(iter(content))
if key == "amount":
for amount in content.get(key):
print(f"Make Change for {amount} using POST")
result = change(float(amount))
res.append(result)
return jsonify(res)
return "Error! Value accepted: {'amount': <value>}"
@app.route("/multiply/<dollar>/<cents>")
def multiplyroute(dollar, cents):
print(f"Multiply by 100 for {dollar}.{cents}")
amount = f"{dollar}.{cents}"
result = multiply(float(amount))
return jsonify(result)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=5000, debug=True)
| StarcoderdataPython |
1768938 | # Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright 2019 The OSArchiver Authors. All rights reserved.
"""
Destination abstract base class file
"""
from abc import ABCMeta, abstractmethod
class Destination(metaclass=ABCMeta):
"""
The Destination absrtact base class
"""
def __init__(self, name=None, backend='db'):
"""
Destination object is defined by a name and a backend
"""
self.name = name
self.backend = backend
@abstractmethod
def write(self, database=None, table=None, data=None):
"""
Write method that should be implemented by the backend
"""
@abstractmethod
def clean_exit(self):
"""
clean_exit method that should be implemented by the backend
provide a way to close and clean properly backend stuff
"""
| StarcoderdataPython |
11295733 | """
Some helper functions and classes used throughout the test suite
Constants:
TEST_DIR
COMMIT_DATAFILE
DEST_REPO_PREFIX
FEATURE_BRANCH
DEST_MASTER_COMMITS
DEST_FEATURE_COMMITS
ITEM_OPS_RETURN_VALUE
Helper functions:
load_iter_commits(repo, branch='master', mode='dict')
load_commit_data()
make_commits(repo, commits_data):
read_gitchm(workdir)
listify_attribute(seq, mode='obj')
set_attr_or_key(seq, field, values)
Coroutines:
run_mirror_ops(mirror, **kwargs)
Helper Classes:
ModifiedCHM
"""
from datetime import datetime
import json
import os
from typing import Generator
from git import Actor, Commit, Repo
from gitchm.mirror import CommitHistoryMirror, GITCHMFILE
# Constants
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
COMMIT_DATAFILE = os.path.join(TEST_DIR, "data/commits.json")
DEST_REPO_PREFIX = "mirror"
FEATURE_BRANCH = "feature"
DEST_MASTER_COMMITS = 2 # no. of dummy commits in dest master
DEST_FEATURE_COMMITS = 4 # no. of dummy commits in dest feature
ITEM_OPS_RETURN_VALUE = "value"
# Helper functions
def load_iter_commits(
repo: Repo, branch: str = "master", mode: str = "dict", **kwargs
) -> list:
"""Makes fetched Commit items ready to be used in `make_commits()`.
This helper function converts the fetched Commits items into a list
of dictionaries (similar to the output of `load_commit_data()`) or
as a list of Commit objects.
Args:
repo (Repo): The repo to fetch commits from
branch (str): The branch to fetch commits from (default
is 'master')
mode (str): Can either be 'dict' or 'obj':
- 'dict' (default): returns results as list of dicts
- 'obj': returns results as list of Commit objects
kwargs: optional params for filtering commits
"""
if mode not in ["obj", "dict"]:
raise ValueError("mode must be obj or dict")
if kwargs:
kwargs.update({"regexp_ignore_case": True})
commits = repo.iter_commits(branch, **kwargs)
data = []
for commit in commits:
item = commit
if mode == "dict":
item = dict()
item["hexsha"] = commit.hexsha
item["message"] = commit.message
item["timestamp"] = commit.committed_date
item["author_name"] = commit.author.name
item["author_email"] = commit.author.email
item["committer_name"] = commit.committer.name
item["committer_email"] = commit.committer.email
data.append(item)
return data
def load_commit_data() -> list:
"""Loads dummy commits data from JSON file."""
commits_fetched = []
# Load data for creating dummy commits
with open(COMMIT_DATAFILE, "r", encoding="utf-8") as f:
commits_fetched = json.load(f)
# Sort in chronological order
commits_fetched = sorted(commits_fetched, key=lambda x: x["timestamp"])
return commits_fetched
def make_commits(
repo: Repo, commits_data: list, has_mirror: bool = False
) -> list:
"""Loads commit data from JSON file and makes commits in given repo.
Args:
repo (`Repo`): git repo instance where commits will be made
commits_data (list): Contains the commit details to write
has_mirror (bool): Indicates whether to write to `.gitchmirror`
Returns:
list: list of dicts representing commits made
"""
# Simulate git add-commit workflow for each commit item
for i, commit_item in enumerate(commits_data):
changes = []
hexsha = commit_item["hexsha"]
message = commit_item["message"]
commit_dt = datetime.fromtimestamp(
commit_item["timestamp"]
).isoformat()
# Create new file
fname = f"{i:05d}.txt"
fpath = os.path.join(repo.working_dir, fname)
with open(fpath, "w", encoding="utf-8") as f:
# Write commit message as file content
f.write(message)
changes.append(fpath)
# Write to .gitchmirror file
if has_mirror:
gpath = os.path.join(repo.working_dir, GITCHMFILE)
with open(gpath, "a+", encoding="utf-8") as g:
g.write(f"{hexsha}\n")
changes.append(gpath)
# Create author and committer
author = Actor(
name=commit_item["author_name"], email=commit_item["author_email"]
)
committer = Actor(
name=commit_item["committer_name"],
email=commit_item["committer_email"],
)
# Stage and commit the created file(s)
repo.index.add(changes)
repo.index.commit(
message=message,
author=author,
author_date=commit_dt,
committer=committer,
commit_date=commit_dt,
)
return commits_data
def read_gitchm(workdir: str) -> list:
"""Returns contents of `.gitchmirror` as list."""
data = []
with open(os.path.join(workdir, GITCHMFILE)) as f:
data = f.read().splitlines()
return data
def listify_attribute(seq: list, attr: str, mode: str = "obj") -> list:
"""Returns list of values specified by attribute or dict key."""
if mode not in ["obj", "dict"]:
raise ValueError("mode must be obj or dict")
if mode == "obj":
return [getattr(p, attr) for p in seq]
elif mode == "dict":
return [p.get(attr) for p in seq]
def set_attr_or_key(seq: list, field: str, values: list) -> None:
"""Sets value to the given attribute or key for each item in `seq`.
If the items are dicts, the function assigns given values to the
specified key of each item. Otherwise, the values are assigned
to the given attribute of each object.
Args:
seq (list): The list of dictionaries or objects
field (str): The name of the key or attribute to be modified
values (list): The list of values to be assigned; must be of the
same length as `seq`, and each item in `values` must
exactly correspond to an item in `seq` (i.e.,the item at
index n of `values` should correspond to the item at
index n of `seq`)
"""
for item, val in zip(seq, values):
if isinstance(item, dict):
item[field] = val
else:
setattr(item, field, val)
# Coroutines
async def run_mirror_ops(mirror: CommitHistoryMirror, **kwargs) -> None:
"""Runs `mirror.reflect()` with given kwargs."""
await mirror.reflect(**kwargs)
# Helper classes
class ModifiedCHM(CommitHistoryMirror):
"""Overrides parent class's __init__ method.
This makes it possible to separately test each
method called in the original class's __init__ method.
"""
def __init__(
self,
source_workdir: str = "",
dest_workdir: str = "",
prefix: str = DEST_REPO_PREFIX,
) -> None:
self.source_workdir = source_workdir
self.dest_workdir = dest_workdir
self.dest_prefix = prefix
self.prior_dest_exists = False
self.dest_head_commit = None
self.dest_has_tree = False
self.dest_commit_hashes = []
self.dest_is_mirror = False
| StarcoderdataPython |
4901777 | <gh_stars>1-10
import pathlib
import time
import subprocess
from cycler import cycler
import yaqc_bluesky
from yaqd_core import testing
from bluesky import RunEngine
from bluesky.plans import rel_spiral
__here__ = pathlib.Path(__file__).parent
@testing.run_daemon_entry_point(
"fake-triggered-sensor", config=__here__ / "triggered-sensor-config.toml"
)
@testing.run_daemon_entry_point(
"fake-continuous-hardware", config=__here__ / "continuous-hardware-config.toml"
)
def test_simple_rel_spiral():
RE = RunEngine()
hardware_x = yaqc_bluesky.Device(39423)
hardware_y = yaqc_bluesky.Device(39424)
sensor = yaqc_bluesky.Device(39425)
hardware_x.set(0)
hardware_y.set(0)
RE(
rel_spiral(
[sensor], x_motor=hardware_x, y_motor=hardware_y, x_range=1, y_range=1, dr=0.5, nth=10
)
)
if __name__ == "__main__":
test_simple_rel_spiral()
| StarcoderdataPython |
3495697 | from __future__ import print_function
import os
import shutil
import subprocess
import sys
from threading import Timer
import ustrings
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from ..configuration import IrodsConfig
from .. import test
from .. import lib
from .resource_suite import ResourceBase
from . import session
class Test_Resource_Replication_Timing(ResourceBase, unittest.TestCase):
plugin_name = IrodsConfig().default_rule_engine_plugin
def setUp(self):
with session.make_session_for_existing_admin() as admin_session:
admin_session.assert_icommand("iadmin modresc demoResc name origResc", 'STDOUT_SINGLELINE', 'rename', input='yes\n')
admin_session.assert_icommand("iadmin mkresc demoResc replication", 'STDOUT_SINGLELINE', 'replication')
irods_config = IrodsConfig()
admin_session.assert_icommand("iadmin mkresc unix1Resc 'unixfilesystem' " + test.settings.HOSTNAME_1 + ":" +
irods_config.irods_directory + "/unix1RescVault", 'STDOUT_SINGLELINE', 'unixfilesystem')
admin_session.assert_icommand("iadmin mkresc unix2Resc 'unixfilesystem' " + test.settings.HOSTNAME_2 + ":" +
irods_config.irods_directory + "/unix2RescVault", 'STDOUT_SINGLELINE', 'unixfilesystem')
admin_session.assert_icommand("iadmin mkresc unix3Resc 'unixfilesystem' " + test.settings.HOSTNAME_3 + ":" +
irods_config.irods_directory + "/unix3RescVault", 'STDOUT_SINGLELINE', 'unixfilesystem')
admin_session.assert_icommand("iadmin addchildtoresc demoResc unix1Resc")
admin_session.assert_icommand("iadmin addchildtoresc demoResc unix2Resc")
admin_session.assert_icommand("iadmin addchildtoresc demoResc unix3Resc")
self.child_replication_count = 3
super(Test_Resource_Replication_Timing, self).setUp()
def tearDown(self):
super(Test_Resource_Replication_Timing, self).tearDown()
with session.make_session_for_existing_admin() as admin_session:
admin_session.assert_icommand("iadmin rmchildfromresc demoResc unix3Resc")
admin_session.assert_icommand("iadmin rmchildfromresc demoResc unix2Resc")
admin_session.assert_icommand("iadmin rmchildfromresc demoResc unix1Resc")
admin_session.assert_icommand("iadmin rmresc unix3Resc")
admin_session.assert_icommand("iadmin rmresc unix2Resc")
admin_session.assert_icommand("iadmin rmresc unix1Resc")
admin_session.assert_icommand("iadmin rmresc demoResc")
admin_session.assert_icommand("iadmin modresc origResc name demoResc", 'STDOUT_SINGLELINE', 'rename', input='yes\n')
irods_config = IrodsConfig()
shutil.rmtree(irods_config.irods_directory + "/unix1RescVault", ignore_errors=True)
shutil.rmtree(irods_config.irods_directory + "/unix2RescVault", ignore_errors=True)
shutil.rmtree(irods_config.irods_directory + "/unix3RescVault", ignore_errors=True)
def test_rebalance_invocation_timestamp__3665(self):
# prepare out of balance tree with enough objects to trigger rebalance paging (>500)
localdir = '3665_tmpdir'
shutil.rmtree(localdir, ignore_errors=True)
lib.make_large_local_tmp_dir(dir_name=localdir, file_count=600, file_size=5)
self.admin.assert_icommand(['iput', '-r', localdir], "STDOUT_SINGLELINE", ustrings.recurse_ok_string())
self.admin.assert_icommand(['iadmin', 'mkresc', 'newchild', 'unixfilesystem', test.settings.HOSTNAME_1+':/tmp/newchildVault'], 'STDOUT_SINGLELINE', 'unixfilesystem')
self.admin.assert_icommand(['iadmin','addchildtoresc','demoResc','newchild'])
# run rebalance with concurrent, interleaved put/trim of new file
self.admin.assert_icommand(['ichmod','-r','own','rods',self.admin.session_collection])
self.admin.assert_icommand(['ichmod','-r','inherit',self.admin.session_collection])
laterfilesize = 300
laterfile = '3665_laterfile'
lib.make_file(laterfile, laterfilesize)
put_thread = Timer(2, subprocess.check_call, [('iput', '-R', 'demoResc', laterfile, self.admin.session_collection)])
trim_thread = Timer(3, subprocess.check_call, [('itrim', '-n3', self.admin.session_collection + '/' + laterfile)])
put_thread.start()
trim_thread.start()
self.admin.assert_icommand(['iadmin','modresc','demoResc','rebalance'])
put_thread.join()
trim_thread.join()
# new file should not be balanced (rebalance should have skipped it due to it being newer)
self.admin.assert_icommand(['ils', '-l', laterfile], 'STDOUT_SINGLELINE', [str(laterfilesize), ' 0 ', laterfile])
self.admin.assert_icommand(['ils', '-l', laterfile], 'STDOUT_SINGLELINE', [str(laterfilesize), ' 1 ', laterfile])
self.admin.assert_icommand(['ils', '-l', laterfile], 'STDOUT_SINGLELINE', [str(laterfilesize), ' 2 ', laterfile])
self.admin.assert_icommand_fail(['ils', '-l', laterfile], 'STDOUT_SINGLELINE', [str(laterfilesize), ' 3 ', laterfile])
# cleanup
os.unlink(laterfile)
shutil.rmtree(localdir, ignore_errors=True)
self.admin.assert_icommand(['iadmin','rmchildfromresc','demoResc','newchild'])
self.admin.assert_icommand(['itrim', '-Snewchild', '-r', '/tempZone'], 'STDOUT_SINGLELINE', 'Total size trimmed')
self.admin.assert_icommand(['iadmin','rmresc','newchild'])
@unittest.skipIf(test.settings.RUN_IN_TOPOLOGY, 'Reads server log')
def test_rebalance_logging_replica_update__3463(self):
filename = 'test_rebalance_logging_replica_update__3463'
file_size = 400
lib.make_file(filename, file_size)
self.admin.assert_icommand(['iput', filename])
self.update_specific_replica_for_data_objs_in_repl_hier([(filename, filename)])
initial_log_size = lib.get_file_size_by_path(IrodsConfig().server_log_path)
self.admin.assert_icommand(['iadmin', 'modresc', 'demoResc', 'rebalance'])
data_id = session.get_data_id(self.admin, self.admin.session_collection, filename)
lib.delayAssert(
lambda: lib.log_message_occurrences_equals_count(
msg='updating out-of-date replica for data id [{0}]'.format(str(data_id)),
count=2,
server_log_path=IrodsConfig().server_log_path,
start_index=initial_log_size))
os.unlink(filename)
@unittest.skipIf(test.settings.RUN_IN_TOPOLOGY, 'Reads server log')
def test_rebalance_logging_replica_creation__3463(self):
filename = 'test_rebalance_logging_replica_creation__3463'
file_size = 400
lib.make_file(filename, file_size)
self.admin.assert_icommand(['iput', filename])
self.admin.assert_icommand(['itrim', '-S', 'demoResc', '-N1', filename], 'STDOUT_SINGLELINE', 'Number of files trimmed = 1.')
initial_log_size = lib.get_file_size_by_path(IrodsConfig().server_log_path)
self.admin.assert_icommand(['iadmin', 'modresc', 'demoResc', 'rebalance'])
data_id = session.get_data_id(self.admin, self.admin.session_collection, filename)
lib.delayAssert(
lambda: lib.log_message_occurrences_equals_count(
msg='creating new replica for data id [{0}]'.format(str(data_id)),
count=2,
server_log_path=IrodsConfig().server_log_path,
start_index=initial_log_size))
os.unlink(filename)
def update_specific_replica_for_data_objs_in_repl_hier(self, name_pair_list, repl_num=0):
# determine which resource has replica 0
_,out,_ = self.admin.assert_icommand(['iquest', "select DATA_RESC_HIER where DATA_NAME = '{0}' and DATA_REPL_NUM = '{1}'".format(name_pair_list[0][0], repl_num)], 'STDOUT_SINGLELINE', 'DATA_RESC_HIER')
replica_0_resc = out.splitlines()[0].split()[-1].split(';')[-1]
# remove from replication hierarchy
self.admin.assert_icommand(['iadmin', 'rmchildfromresc', 'demoResc', replica_0_resc])
# update all the replica 0's
for (data_obj_name, filename) in name_pair_list:
self.admin.assert_icommand(['iput', '-R', replica_0_resc, '-f', '-n', str(repl_num), filename, data_obj_name])
# restore to replication hierarchy
self.admin.assert_icommand(['iadmin', 'addchildtoresc', 'demoResc', replica_0_resc])
| StarcoderdataPython |
9610120 | import io
import os
import json
import logging
from copy import deepcopy
from collections import OrderedDict
from django.shortcuts import render
from django.core.exceptions import ValidationError
from django.http import Http404, HttpResponse
from django.db import transaction
from django.db.models import Subquery, OuterRef
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User, AnonymousUser
from django.utils.translation import gettext_lazy as _
from django.utils.decorators import method_decorator
from rest_framework import viewsets, status, parsers, generics, serializers
from rest_framework.response import Response
from rest_framework.views import APIView, exception_handler
from rest_framework.decorators import action, permission_classes
from rest_framework import permissions, pagination, mixins
from rest_framework import exceptions as rest_except
from rest_framework.exceptions import APIException
from rest_framework.schemas.openapi import AutoSchema
from drf_yasg.inspectors.view import SwaggerAutoSchema
from drf_yasg.utils import swagger_auto_schema, no_body
from drf_yasg import openapi
from annotation import models
from annotation.exceptions import AnnotationBaseExcept, FileParseException
from annotation.models import Projects, PROJECT_TYPE
import annotation.serializers as anno_serializer
from annotation.serializers import ProjectsSerializer, DocumentsSerializer
from libs.files import FilesBase
from libs.utils import is_int
# Utils
# -------------------
def get_generic_error_schema(name="API Error"):
return openapi.Schema(
name,
type=openapi.TYPE_OBJECT,
properties={
"detail": openapi.Schema(
type=openapi.TYPE_STRING, description="Error details"
),
# 'code': openapi.Schema(
# type=openapi.TYPE_STRING, description='Error code'),
},
required=["detail"],
)
def custom_exception_handler(exc, context):
"""Convert dict and list to string"""
if not hasattr(exc, "detail"):
exc.detail = str(exc)
if isinstance(exc.detail, list):
exc.detail = " ".join(exc.detail)
if isinstance(exc.detail, dict):
s = ". ".join(["{}: {}".format(k, v) for k, v in exc.detail.items()])
exc.detail = s
response = exception_handler(exc, context)
return response
# API
# -------------------
class ProjectViewSet(viewsets.ModelViewSet):
"""API endpoint for work with project"""
queryset = Projects.objects.all()
serializer_class = ProjectsSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@swagger_auto_schema(responses={"400": get_generic_error_schema()})
def create(self, request, *args, **kwargs):
"""Create the project"""
if request.data["type"] not in [x[0] for x in models.PROJECT_TYPE]:
raise rest_except.ParseError(_("Type not found"))
description = request.data.get("description")
if description is None:
description = ""
project = models.Projects.objects.create(
name=request.data["name"],
description=description,
type=request.data["type"],
owner=request.user,
)
serializer = self.serializer_class(project, many=False)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@swagger_auto_schema(responses={"400": get_generic_error_schema()})
def list(self, request, *args, **kwargs):
"""List of all projects"""
if isinstance(request.user, AnonymousUser):
return Response(status=status.HTTP_403_FORBIDDEN)
# share projects
o_pr_id = models.ProjectsPermission.objects.filter(
user=request.user
).values_list("project", flat=True)
q1 = models.Projects.objects.filter(owner=request.user)
q2 = models.Projects.objects.filter(pk__in=o_pr_id)
queryset = q1 | q2
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
@swagger_auto_schema(
responses={"400": get_generic_error_schema(), "204": "Success"},
)
def destroy(self, request, *args, **kwargs):
"""Delete the project"""
obj = self.get_object()
if obj.owner != request.user:
return Response(status=status.HTTP_401_UNAUTHORIZED)
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
responses={
"200": anno_serializer.DocumentsSerializerSimple(many=True),
"400": get_generic_error_schema(),
},
manual_parameters=[
openapi.Parameter(
"approved",
openapi.IN_QUERY,
description="If specified then will filter by field 'approved'",
type=openapi.TYPE_INTEGER,
required=False,
enum=[0, 1],
),
],
)
@action(detail=True, methods=["get"])
def documents_list_simple(self, request, pk=None):
"""Get documents without content
Query:
approved - If specified then will filter by field 'approved'
0 - Not verifed only
1 - Verifed only
"""
afilter = {"project": self.get_object()}
f_approved = request.query_params.get("approved")
if f_approved is not None and is_int(f_approved):
afilter["approved"] = bool(int(f_approved))
docs = models.Documents.objects.filter(**afilter).order_by("file_name")
serializer = anno_serializer.DocumentsSerializerSimple(docs, many=True)
return Response(serializer.data)
@swagger_auto_schema(
responses={
"200": openapi.Schema(
"Success",
type=openapi.TYPE_OBJECT,
properties={
"docs_approve_count": openapi.Schema(
type=openapi.TYPE_INTEGER,
description="Count approved documents",
),
"docs_total": openapi.Schema(
type=openapi.TYPE_INTEGER, description="Total documents"
),
},
required=["docs_approve_count", "docs_total"],
),
"400": get_generic_error_schema(),
},
)
@action(detail=True, methods=["get"])
def info(self, request, pk=None):
"""Get info about approved documents in project: count and total"""
docs_approved = models.Documents.objects.filter(
project=self.get_object(), approved=True
).count()
docs_total = models.Documents.objects.filter(
project=self.get_object()
).count()
r = {"docs_approve_count": docs_approved, "docs_total": docs_total}
return Response(r, status=200)
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="Retrieve document",
responses={
"400": get_generic_error_schema(),
"404": get_generic_error_schema(),
},
),
)
@method_decorator(
name="destroy",
decorator=swagger_auto_schema(
operation_description="Delete document",
responses={"204": "Success delete", "400": get_generic_error_schema()},
),
)
class DocumentSeqViewSet(
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.Documents.objects.all()
serializer_class = anno_serializer.DocumentSeqSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@swagger_auto_schema(
responses={"400": get_generic_error_schema(), "204": "Success change"},
request_body=no_body,
)
@action(detail=True, methods=["post"])
def approved(self, request, pk=None):
"""Set approved"""
doc = self.get_object()
doc.approved = True
doc.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
responses={"400": get_generic_error_schema(), "204": "Success change"},
request_body=no_body,
)
@action(detail=True, methods=["post"])
def unapproved(self, request, pk=None):
"""Unset approved"""
doc = self.get_object()
doc.approved = False
doc.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
responses={"400": get_generic_error_schema(), "204": "Success reset"},
request_body=no_body,
)
@action(detail=True, methods=["post"])
def reset(self, request, pk=None):
"""Delete the all TL Labels in document"""
doc = self.get_object()
for sequence in models.Sequence.objects.filter(document=doc):
models.TlSeqLabel.objects.filter(sequence=sequence).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class DocumentSeqDcLabel(generics.GenericAPIView):
"""Work with Documnt Classifier Label for document"""
queryset = models.Documents.objects.all()
serializer_class = anno_serializer.DocumentSeqSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@swagger_auto_schema(
responses={"400": get_generic_error_schema(), "204": "Success set",},
request_body=openapi.Schema(
in_=openapi.IN_BODY,
type=openapi.TYPE_OBJECT,
properties={
"label_id": openapi.Schema(
type=openapi.TYPE_INTEGER, description="Id label"
),
"value": openapi.Schema(
type=openapi.TYPE_INTEGER,
description="Value of label (0/1)",
enum=[0, 1],
),
},
required=["label_id", "value"],
),
)
def post(self, request, pk=None):
"""Set label for document"""
doc = self.get_object()
label_id = request.data.get("label_id", None)
if label_id is None:
raise rest_except.ParseError(_("Label not found"))
label = models.TlLabels.objects.get(pk=label_id)
try:
value = int(request.data.get("value", None))
if value not in [0, 1]:
raise rest_except.ParseError(_("Value label not 0/1"))
except TypeError:
raise rest_except.ParseError(_("Value not convert to int"))
obj = None
try:
obj = models.DCDocLabel.objects.get(label=label, document=doc)
except models.DCDocLabel.DoesNotExist:
pass
if value == 1:
# set
if obj is None:
models.DCDocLabel.objects.create(label=label, document=doc)
if value == 0:
if obj is not None:
obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
responses={
"400": get_generic_error_schema(),
"200": anno_serializer.DCDocLabelSerializer(many=True),
},
request_body=no_body,
)
def get(self, request, pk=None):
"""Return all list labels for documents."""
resp = Response(self.get_object().get_labels(), status=200)
return resp
@swagger_auto_schema(
responses={"400": get_generic_error_schema(), "204": "Success delete"},
request_body=no_body,
)
def delete(self, request, pk=None):
"""Delete all label for documents."""
self.get_object().labels_del()
resp = Response(status=status.HTTP_204_NO_CONTENT)
return resp
# Permissions
# ==========
# class RBACPermission(permissions.BasePermission):
# def has_permission(self, request, view):
# print(type(view))
# project = None
# if type(view) == TLLabelsViewSet:
# project = models.Projects.objects.get(pk=request.data["project"])
# return self._check(request, project)
# def has_object_permission(self, request, view, obj):
# project = None
# if type(obj) == models.TlLabels:
# project = obj.project
# return self._check(request, project)
# def _check(self, request, project):
# if project is None:
# return False
# if request.user == project.owner:
# return True
# return False
# @permission_classes([RBACPermission])
@method_decorator(
name="create",
decorator=swagger_auto_schema(
operation_description="Create label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="Retrieve label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="Update label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="partial_update",
decorator=swagger_auto_schema(
operation_description="Partial update label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="destroy",
decorator=swagger_auto_schema(
operation_description="Delete label",
responses={"204": "Success delete", "400": get_generic_error_schema(),},
),
)
class TLLabelsViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""Work with label"""
queryset = models.TlLabels.objects.all()
serializer_class = anno_serializer.TLLabelsSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@method_decorator(
name="retrieve",
decorator=swagger_auto_schema(
operation_description="Retrieve TL-Seq-Label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="update",
decorator=swagger_auto_schema(
operation_description="Update TL-Seq-Label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="partial_update",
decorator=swagger_auto_schema(
operation_description="Partial update TL-Seq-Label",
responses={"400": get_generic_error_schema(),},
),
)
@method_decorator(
name="destroy",
decorator=swagger_auto_schema(
operation_description="Delete TL-Seq-Label",
responses={"204": "Success delete", "400": get_generic_error_schema(),},
),
)
class TLSeqLabelViewSet(
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = models.TlSeqLabel.objects.all()
serializer_class = anno_serializer.TLSeqLabelSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@swagger_auto_schema(
responses={
"200": anno_serializer.TLSeqLabelSerializer(many=True),
# "201": "Created success",
"400": get_generic_error_schema(),
},
)
def create(self, request):
"""Creates the object 'Seq labels' and returns their as array"""
result = []
offset_start = request.data["offset_start"]
offset_stop = request.data["offset_stop"]
seq_id = request.data["sequence"]
label_id = request.data["label"]
text = models.Sequence.objects.filter(pk=seq_id).values("text")
if len(text) != 1:
return Response(status=404)
text = text[0].get("text", "")
offset_chunk = offset_start
for item in text[offset_start:offset_stop].split(" "):
try:
r = models.TlSeqLabel.objects.create(
offset_start=offset_chunk,
offset_stop=offset_chunk + len(item),
sequence=models.Sequence.objects.get(pk=seq_id),
label=models.TlLabels.objects.get(pk=label_id),
)
except models.TlLabels.DoesNotExist:
raise rest_except.ParseError(_("Label not found"))
offset_chunk = offset_chunk + len(item) + 1
serializer = anno_serializer.TLSeqLabelSerializer(r)
result.append(serializer.data)
return Response(result)
# Extra
class ProjectHelps:
def __init__(self, project_id):
self.project = models.Projects.objects.get(pk=project_id)
# ==/ Block: Labels
def _label_get_handler(self):
map = {
"text_label": self._process_labels_tl,
"document_classificaton": self._process_labels_dc,
}
label_handler = map.get(self.project.type)
if label_handler is None:
raise AnnotationBaseExcept(_("Did not find handler for labels"))
return label_handler
def _label_doc_get_handler(self):
map = {
"text_label": self._process_labels_doc_tl,
"document_classificaton": self._process_labels_doc_dc,
}
label_handler = map.get(self.project.type)
if label_handler is None:
raise AnnotationBaseExcept(_("Did not find handler for labels doc"))
return label_handler
def _process_labels_tl(self, labels, seq):
"""Process labels for text labels"""
# Create new label
labels_uniq = set()
for tag, _, _ in labels:
labels_uniq.add(tag)
all_label_name = models.TlLabels.objects.filter(
project=self.project
).values_list("name", flat=True)
for label in list(labels_uniq):
if label in all_label_name:
continue
try:
models.TlLabels.objects.create(project=self.project, name=label)
except ValidationError:
pass
# Add label for sequence
for tag, char_left, char_right in labels:
models.TlSeqLabel.objects.create(
sequence=seq,
label=models.TlLabels.objects.get(
project=self.project, name=tag
),
offset_start=char_left,
offset_stop=char_right,
)
def _process_labels_dc(self, labels, seq):
pass
def _process_labels_doc_tl(self, labels, doc):
pass
def _process_labels_doc_dc(self, labels, doc):
labels_uniq = set()
for tag, _ in labels:
labels_uniq.add(tag)
all_label_name = models.TlLabels.objects.filter(
project=self.project
).values_list("name", flat=True)
for label in list(labels_uniq):
if label in all_label_name:
continue
try:
models.TlLabels.objects.create(project=self.project, name=label)
except ValidationError:
pass
# Add label for doc
for tag, value in labels:
if value == 0:
continue
models.DCDocLabel.objects.create(
document=doc,
label=models.TlLabels.objects.get(
project=self.project, name=tag
),
)
# ==/ End Block
def _export_handler(self):
"""Iteration documents for export process"""
docs = models.Documents.objects.filter(project=self.project).order_by(
"file_name"
)
project_type = self.project.type
for doc in docs:
data = []
labels_on_doc = []
if project_type == "document_classificaton":
for x in doc.get_labels():
labels_on_doc.append((x["name"], x["value"]))
seqs = models.Sequence.objects.filter(document=doc).order_by(
"order"
)
for idx, seq in enumerate(seqs):
labels = []
if project_type == "text_label":
labels_obj = models.TlSeqLabel.objects.filter(
sequence=seq
).order_by("offset_start")
for lb_seq in labels_obj:
labels.append(
(
lb_seq.label.name,
lb_seq.offset_start,
lb_seq.offset_stop,
)
)
data.append((idx, seq.text, labels))
yield data, json.loads(doc.meta), doc.file_name, labels_on_doc
class ProjectDSImport(generics.GenericAPIView):
parser_classes = (parsers.MultiPartParser,)
serializer_class = anno_serializer.ProjectDSImport
permission_classes = [permissions.IsAuthenticated]
@swagger_auto_schema(
responses={"204": "Success import", "400": get_generic_error_schema(),},
)
def put(self, request, pk=None):
"""Import file/files in dataset"""
try:
file_obj = request.FILES.get("files", None)
file_format = request.data.get("format", None)
proj_helper = ProjectHelps(pk)
# project = models.Projects.objects.get(pk=pk)
except models.Projects.DoesNotExist:
raise rest_except.ParseError(_("Project not found"))
if file_obj is None:
raise rest_except.ParseError(_("File not found"))
file_handler = FilesBase.factory(
file_format, proj_helper.project.type, "import", file_obj
)
if file_handler is None:
raise rest_except.ParseError(_("Format not exists"))
try:
# Select process labels
label_handler = proj_helper._label_get_handler()
label_doc_handler = proj_helper._label_doc_get_handler()
with transaction.atomic():
for data, meta, file_name, lb_doc in file_handler.import_ds():
# Create doc
doc = models.Documents.objects.create(
project=proj_helper.project,
file_name=os.path.splitext(file_name)[0],
meta=json.dumps(meta),
)
# Create seqences
for index, text, labels in data:
seq = models.Sequence.objects.create(
document=doc, text=text, order=index
)
# Create labels for seq
label_handler(labels, seq)
# Create label for doc
label_doc_handler(lb_doc, doc)
except FileParseException as e:
raise rest_except.ParseError(str(e))
except AnnotationBaseExcept as e:
raise rest_except.ParseError(str(e))
return Response(status=status.HTTP_204_NO_CONTENT)
def get_queryset(self):
return None
class ProjectDSExport(generics.GenericAPIView, ProjectHelps):
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
serializer_class = None
@swagger_auto_schema(
produces="application/zip",
responses={
"200": openapi.Response(
description="Content-Type: application/zip",
schema=openapi.Schema(type=openapi.TYPE_FILE),
),
"400": get_generic_error_schema(),
},
manual_parameters=[
openapi.Parameter(
"exformat",
openapi.IN_QUERY,
description="Format file",
type=openapi.TYPE_STRING,
required=True,
enum=[x[0] for x in anno_serializer.get_all_formats()],
)
],
)
def get(self, request, pk=None):
"""Export documents from dataset"""
# Query:
# exformat - The format exported files
try:
proj_helper = ProjectHelps(pk)
except models.Projects.DoesNotExist:
raise rest_except.ParseError(_("Project not found"))
exformat = request.query_params.get("exformat")
file_handler = FilesBase.factory(
exformat, proj_helper.project.type, "export", None
)
if file_handler is None:
raise rest_except.ParseError(_("Format not exists"))
response = HttpResponse(
file_handler.export_ds(proj_helper._export_handler),
content_type="application/zip",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(
"export.zip"
)
return response
def get_queryset(self):
return None
class ProjectActionDocumentList(generics.GenericAPIView):
""""""
queryset = Projects.objects.all()
serializer_class = DocumentsSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = pagination.PageNumberPagination
@swagger_auto_schema(
responses={
"400": get_generic_error_schema(),
"404": get_generic_error_schema("Page not found"),
},
manual_parameters=[
openapi.Parameter(
"approved",
openapi.IN_QUERY,
description="If specified then will filter by field 'approved'",
type=openapi.TYPE_INTEGER,
required=False,
enum=[0, 1],
),
],
)
def get(self, request, pk=None):
"""Get list documents with content by page
Query:
approved - If specified then will filter by field 'approved'
0 - Not verifed only
1 - Verifed only
"""
try:
project = models.Projects.objects.get(pk=pk)
except models.Projects.DoesNotExist:
raise rest_except.ParseError(_("Project not found"))
afilter = {"project": project}
f_approved = request.query_params.get("approved")
if f_approved is not None and is_int(f_approved):
afilter["approved"] = bool(int(f_approved))
docs = models.Documents.objects.filter(**afilter).order_by("file_name")
docs_page = self.paginate_queryset(docs)
if docs_page is not None:
serializer = DocumentsSerializer(docs_page, many=True)
return self.get_paginated_response(serializer.data)
serializer = DocumentsSerializer(docs, many=True)
return Response(serializer.data, status=200)
class ProjectActionTLLabelList(generics.GenericAPIView):
""""""
queryset = Projects.objects.all()
serializer_class = anno_serializer.TLLabelsSerializer
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@swagger_auto_schema(
responses={
"200": anno_serializer.TLLabelsSerializer(many=True),
"400": get_generic_error_schema(),
},
)
def get(self, request, pk=None):
"""Get list labels for project"""
try:
project = models.Projects.objects.get(pk=pk)
except models.Projects.DoesNotExist:
raise rest_except.ParseError(_("Project not found"))
labels = models.TlLabels.objects.filter(project=project).order_by("id")
serializer = anno_serializer.TLLabelsSerializer(labels, many=True)
return Response(serializer.data)
class ProjectPermission(generics.GenericAPIView):
""""""
queryset = Projects.objects.all()
serializer_class = anno_serializer.ProjectsPermission
permission_classes = [permissions.IsAuthenticated]
pagination_class = None
@swagger_auto_schema(responses={"400": get_generic_error_schema(),},)
def get(self, request, pk=None):
"""Get list permissions"""
project = self.get_project(pk)
perm = models.ProjectsPermission.objects.filter(
project=self.get_object()
)
serializer = self.serializer_class(perm, many=True)
return Response(serializer.data, status=200)
@swagger_auto_schema(
responses={
"201": "Created success",
# "201": openapi.Schema(
# type=openapi.TYPE_OBJECT,
# properties={
# 'role': openapi.Schema(
# type=openapi.TYPE_STRING,
# description='Name of role',
# enum=[x[0] for x in models.PROJECT_ROLES]),
# 'username': openapi.Schema(
# type=openapi.TYPE_STRING, description='User name'),
# },
# required=['username', "role"]
# ),
"400": get_generic_error_schema(),
},
)
def post(self, request, pk=None):
"""Add rights for user"""
project = self.get_project(pk)
# --- Auth
# if self.get_object().owner != request.user:
# return Response(status=status.HTTP_403_FORBIDDEN)
# --- Checks
username = request.data.get("username")
role = request.data.get("role")
if username is None:
raise rest_except.ParseError(
_("The field '{}' is incorrectly filled").format("username")
)
if role is None:
raise rest_except.ParseError(
_("The field '{}' is incorrectly filled").format("role")
)
try:
user_obj = User.objects.get(username=username)
except User.DoesNotExist:
raise rest_except.ParseError(
_("'{}'. User not found").format(username)
)
if user_obj == project.owner:
raise rest_except.ParseError(_("The project owner selected"))
if role not in [x[0] for x in models.PROJECT_ROLES]:
raise rest_except.ParseError(_("Unknown role"))
try:
perm = models.ProjectsPermission.objects.get(
project=project, user=user_obj
)
except models.ProjectsPermission.DoesNotExist:
perm = None
# --- Action
if perm is None:
# create
perm = models.ProjectsPermission.objects.create(
project=project, user=user_obj, role=role
)
return Response(status=status.HTTP_201_CREATED)
else:
# update
perm.role = role
perm.save()
serializer = self.serializer_class(perm)
return Response(serializer.data, status=200)
@swagger_auto_schema(responses={"400": get_generic_error_schema(),},)
def put(self, request, pk=None):
"""Update permission"""
return self.post(request, pk)
@swagger_auto_schema(
responses={"204": "Success delete", "400": get_generic_error_schema(),},
request_body=openapi.Schema(
in_=openapi.IN_BODY,
type=openapi.TYPE_OBJECT,
properties={
"username": openapi.Schema(
type=openapi.TYPE_STRING, description="User name"
),
},
required=["username"],
),
)
def delete(self, request, pk=None):
"""Delete permission for user"""
# --- Auth
# if self.get_object().owner != request.user:
# return Response(status=status.HTTP_403_FORBIDDEN)
# --- Check
project = self.get_project(pk)
username = request.data.get("username")
try:
user_obj = User.objects.get(username=username)
except User.DoesNotExist:
raise rest_except.ParseError(
_("'{}'. User not found").format(username)
)
try:
perm = models.ProjectsPermission.objects.get(
project=project, user=user_obj
)
perm.delete()
except models.ProjectsPermission.DoesNotExist:
pass
return Response(status=status.HTTP_204_NO_CONTENT)
def get_project(self, pk):
try:
project = models.Projects.objects.get(pk=pk)
except models.Projects.DoesNotExist:
raise rest_except.ParseError(_("Project not found"))
return project
| StarcoderdataPython |
8155503 | <gh_stars>0
import socket
hostname = socket.gethostname()
ROOT = '/scratch2/www/signbank/'
BASE_DIR = ROOT+'repo/'
WRITABLE_FOLDER = ROOT+'writable/'
# Added test database, to run unit tests using this copy of the database, use -k argument to keep test database
# python bin/develop.py test -k
DATABASES = {'default':
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME': WRITABLE_FOLDER+'database/signbank.db',
'TEST': {
'NAME': WRITABLE_FOLDER+'database/test-signbank.db',
}
}
}
ADMINS = (('<NAME>', '<EMAIL>'))
# what do we call this signbank?
LANGUAGE_NAME = "Global"
COUNTRY_NAME = "Netherlands"
#Influences which template and css folder are used
SIGNBANK_VERSION_CODE = 'global'
URL = 'https://signbank.science.ru.nl/'
ALLOWED_HOSTS = ['signbank.science.ru.nl']
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('nl', gettext('Dutch')),
('zh-hans', gettext('Chinese'))
)
LANGUAGE_CODE = "en"
SEPARATE_ENGLISH_IDGLOSS_FIELD = True
DEFAULT_KEYWORDS_LANGUAGE = {'language_code_2char': 'en'}
FIELDS = {}
FIELDS['main'] = ['useInstr','wordClass']
FIELDS['phonology'] = ['handedness','domhndsh','subhndsh','handCh','relatArtic','locprim','locVirtObj',
'relOriMov','relOriLoc','oriCh','contType','movSh','movDir','repeat','altern','phonOth', 'mouthG',
'mouthing', 'phonetVar',]
FIELDS['semantics'] = ['iconImg','namEnt','semField','valence']
FIELDS['frequency'] = ['tokNo','tokNoSgnr','tokNoA','tokNoSgnrA','tokNoV','tokNoSgnrV','tokNoR','tokNoSgnrR','tokNoGe','tokNoSgnrGe',
'tokNoGr','tokNoSgnrGr','tokNoO','tokNoSgnrO']
FIELDS['handshape'] = ['hsNumSel', 'hsFingSel', 'hsFingSel2', 'hsFingConf',
'hsFingConf2', 'hsAperture',
'hsSpread', 'hsFingUnsel', 'fsT', 'fsI', 'fsM', 'fsR', 'fsP',
'fs2T', 'fs2I', 'fs2M', 'fs2R', 'fs2P',
'ufT', 'ufI', 'ufM', 'ufR', 'ufP']
ECV_FILE = WRITABLE_FOLDER+'ecv/ngt.ecv'
ECV_FOLDER = WRITABLE_FOLDER+'ecv'
ECV_SETTINGS = {
'CV_ID': 'CNGT_RU-lexicon',
'include_phonology_and_frequencies': True,
# The order of languages matters as the first will
# be treated as default by ELAN
'languages': [
{
'id': 'nld',
'description': 'De glossen-CV voor het CNGT (RU)',
'annotation_idgloss_fieldname': 'annotationidglosstranslation_nl',
'attributes': {
'LANG_DEF': 'http://cdb.iso.org/lg/CDB-00138580-001',
'LANG_ID': 'nld',
'LANG_LABEL': 'Dutch (nld)'
}
},
{
'id': 'eng',
'description': 'The glosses CV for the CNGT (RU)',
'annotation_idgloss_fieldname': 'annotationidglosstranslation_en',
'attributes': {
'LANG_DEF': 'http://cdb.iso.org/lg/CDB-00138502-001',
'LANG_ID': 'eng',
'LANG_LABEL': 'English (eng)'
}
},
]
}
GLOSS_VIDEO_DIRECTORY = 'glossvideo'
GLOSS_IMAGE_DIRECTORY = 'glossimage'
CROP_GLOSS_IMAGES = True
HANDSHAPE_IMAGE_DIRECTORY = 'handshapeimage'
OTHER_MEDIA_DIRECTORY = WRITABLE_FOLDER+'othermedia/'
WSGI_FILE = ROOT+'lib/python2.7/site-packages/signbank/wsgi.py'
IMAGES_TO_IMPORT_FOLDER = WRITABLE_FOLDER+'import_images/'
VIDEOS_TO_IMPORT_FOLDER = WRITABLE_FOLDER+'import_videos/'
OTHER_MEDIA_TO_IMPORT_FOLDER = WRITABLE_FOLDER+'import_other_media/'
SIGNBANK_PACKAGES_FOLDER = WRITABLE_FOLDER+'packages/'
SHOW_MORPHEME_SEARCH = True
SHOW_DATASET_INTERFACE_OPTIONS = True
DEFAULT_DATASET = 'NGT'
CNGT_EAF_FILES_LOCATION = WRITABLE_FOLDER+'corpus-ngt/eaf/'
CNGT_METADATA_LOCATION = ROOT+'CNGT_MetadataEnglish_OtherResearchers.csv'
FFMPEG_PROGRAM = "avconv"
TMP_DIR = "/tmp"
API_FIELDS = [
'idgloss',
]
# This is a short mapping between 2 and 3 letter language code
# This needs more complete solution (perhaps a library),
# but then the code cn for Chinese should changed to zh.
LANGUAGE_CODE_MAP = [
{2:'nl',3:'nld'},
{2:'en',3:'eng'},
{2:'zh-hans',3:'chi'}
]
SPEED_UP_RETRIEVING_ALL_SIGNS = True
import datetime
RECENTLY_ADDED_SIGNS_PERIOD = datetime.timedelta(days=90)
| StarcoderdataPython |
4873350 | <gh_stars>0
from django.apps import AppConfig
class NpcConfig(AppConfig):
name = 'npc'
| StarcoderdataPython |
3450193 | # from .build_model import *
# from .feat_extr_model import *
#
# __all__ = ["Clustermodel", "FeatureExtractor"]
| StarcoderdataPython |
251304 | <gh_stars>0
"""
captcha-tensorflow
Copyright (c) 2017 <NAME>
https://github.com/JackonYang/captcha-tensorflow/blob/master/captcha-solver-model-restore.ipynb
"""
from os import path
# import matplotlib.pyplot as plt
import numpy as np # linear algebra
import tensorflow as tf
from keras.models import load_model
from PIL import Image
MODEL_PATH = "saved_model/luogu_captcha"
model = load_model(path.join(path.dirname(__file__), MODEL_PATH))
def predict(image):
im = Image.open(image)
# im = im.resize((H, W))
ima = np.array(im) / 255.0
prediction = model.predict(np.array([ima]))
prediction = tf.math.argmax(prediction, axis=-1)
return "".join(map(chr, map(int, prediction[0])))
# plt.imshow(ima)
if __name__ == "__main__":
# Check its architecture
model.summary()
print(predict("./captcha.jpeg"))
| StarcoderdataPython |
72725 | <reponame>radiumweilei/chinahadoop-ml-2
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from sklearn import svm
import matplotlib.colors
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, fbeta_score
import warnings
def show_accuracy(a, b):
acc = a.ravel() == b.ravel()
print('正确率:%.2f%%' % (100 * float(acc.sum()) / a.size))
def show_recall(y, y_hat):
# print y_hat[y == 1]
print('召回率:%.2f%%' % (100 * float(np.sum(y_hat[y == 1] == 1)) / np.extract(y == 1, y).size))
if __name__ == "__main__":
warnings.filterwarnings("ignore") # UndefinedMetricWarning
np.random.seed(0) # 保持每次生成的数据相同
c1 = 990
c2 = 10
N = c1 + c2
x_c1 = 3 * np.random.randn(c1, 2)
x_c2 = 0.5 * np.random.randn(c2, 2) + (4, 4)
x = np.vstack((x_c1, x_c2))
y = np.ones(N)
y[:c1] = -1
# 显示大小
s = np.ones(N) * 30
s[:c1] = 10
# 分类器
clfs = [svm.SVC(C=1, kernel='linear'),
svm.SVC(C=1, kernel='linear', class_weight={-1: 1, 1: 10}),
svm.SVC(C=0.8, kernel='rbf', gamma=0.5, class_weight={-1: 1, 1: 2}),
svm.SVC(C=0.8, kernel='rbf', gamma=0.5, class_weight={-1: 1, 1: 10})]
titles = 'Linear', 'Linear, Weight=50', 'RBF, Weight=2', 'RBF, Weight=10'
x1_min, x1_max = x[:, 0].min(), x[:, 0].max() # 第0列的范围
x2_min, x2_max = x[:, 1].min(), x[:, 1].max() # 第1列的范围
x1, x2 = np.mgrid[x1_min:x1_max:500j, x2_min:x2_max:500j] # 生成网格采样点
grid_test = np.stack((x1.flat, x2.flat), axis=1) # 测试点
cm_light = matplotlib.colors.ListedColormap(['#77E0A0', '#FF8080'])
cm_dark = matplotlib.colors.ListedColormap(['g', 'r'])
matplotlib.rcParams['font.sans-serif'] = [u'SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(10, 8), facecolor='w')
for i, clf in enumerate(clfs):
clf.fit(x, y)
y_hat = clf.predict(x)
# show_accuracy(y_hat, y) # 正确率
# show_recall(y, y_hat) # 召回率
print(i + 1, '次:')
print('正确率:\t', accuracy_score(y, y_hat))
print(' 精度 :\t', precision_score(y, y_hat, pos_label=1))
print('召回率:\t', recall_score(y, y_hat, pos_label=1))
print('F1Score:\t', f1_score(y, y_hat, pos_label=1))
# 画图
plt.subplot(2, 2, i + 1)
grid_hat = clf.predict(grid_test) # 预测分类值
grid_hat = grid_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light, alpha=0.8)
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', s=s, cmap=cm_dark) # 样本的显示
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.title(titles[i])
plt.grid()
plt.suptitle(u'不平衡数据的处理', fontsize=18)
plt.tight_layout(1.5)
plt.subplots_adjust(top=0.92)
plt.show()
| StarcoderdataPython |
3376771 | #!/usr/bin/env python3
from taptaptap3 import TapDocumentValidator, parse_string
from taptaptap3.exc import TapMissingPlan, TapInvalidNumbering
from taptaptap3.exc import TapBailout, TapParseError
import io
import pickle
import unittest
def parse(source, strict=False):
return parse_string(source, lenient=not strict)
def validate_manually(doc):
# raises errors in case of errors
val = TapDocumentValidator(doc)
val.sanity_check()
return val.valid()
class TestExceptions(unittest.TestCase):
def testParseError(self):
two_tcs1 = "1..1\nnot ok 1\nnot ok 1\n"
no_plan = "not ok\n"
no_integer_version = "TAP version 13h\n1..1\nok\n"
invalid_plan = "1..1b\nok\n"
negative_plan = "3..0\n "
# two_tcs1
two_tcs1_doc = parse(two_tcs1, False)
self.assertRaises(TapInvalidNumbering, validate_manually, two_tcs1_doc)
two_tcs1_doc = parse(two_tcs1, True)
self.assertRaises(TapInvalidNumbering, validate_manually, two_tcs1_doc)
no_plan_doc = parse(no_plan, False)
self.assertRaises(TapMissingPlan, validate_manually, no_plan_doc)
no_plan_doc = parse(no_plan, True)
self.assertRaises(TapMissingPlan, validate_manually, no_plan_doc)
self.assertRaises(TapParseError, parse, no_integer_version, True)
invalid_plan_doc = parse(invalid_plan, False)
self.assertRaises(TapMissingPlan, validate_manually, invalid_plan_doc)
self.assertRaises(TapParseError, parse, invalid_plan, True)
neg_plan_doc = parse(negative_plan, False)
validate_manually(neg_plan_doc)
self.assertRaises(TapParseError, parse, negative_plan, True)
def testBailout(self):
try:
raise TapBailout("Message")
self.assertTrue(False)
except TapBailout as e:
self.assertIn("Bail out!", str(e))
def testPickle(self):
def trypickle(obj):
dump_file = io.BytesIO()
pickle.dump(obj, dump_file)
dump_file.seek(0)
return pickle.load(dump_file)
bailout = TapBailout("Hello World")
bailout.data = ["Hi", "ho"]
bailout = trypickle(bailout)
self.assertEqual(bailout.msg, "Hello World")
self.assertEqual(";".join(bailout.data), "Hi;ho")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
272216 | #!/usr/bin/env python
# Copyright 2016 University of Chicago
# Licensed under the APL 2.0 license
import argparse
import os
import re
import shutil
import subprocess
import sys
import time
import psycopg2
import fsurfer
import fsurfer.helpers
import fsurfer.log
PARAM_FILE_LOCATION = "/etc/fsurf/db_info"
VERSION = fsurfer.__version__
def purge_workflow_file(path):
"""
Remove the results in specified directory
:param path: path to directory or file to delete
:return: True if successfully removed, False otherwise
"""
logger = fsurfer.log.get_logger()
if not os.path.exists(path):
return True
try:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
os.rmdir(path)
return True
except OSError as e:
logger.exception("Exception: {0}".format(str(e)))
return False
def get_input_files(workflow_id):
"""
Get a list of input files and return this as a list
:param workflow_id: id for workflow
:return: a list of input files for specified id
"""
logger = fsurfer.log.get_logger()
input_files = []
conn = None
try:
conn = fsurfer.helpers.get_db_client()
cursor = conn.cursor()
input_query = "SELECT path " \
"FROM freesurfer_interface.input_files " \
"WHERE job_id = %s"
cursor.execute(input_query, [workflow_id])
for row in cursor.fetchall():
input_files.append(row[0])
input_files.append(os.path.dirname(row[0]))
except psycopg2.Error as e:
logger.exception("Error: {0}".format(e))
return None
finally:
if conn:
conn.close()
return input_files
def delete_incomplete_jobs(dry_run=False):
"""
Delete jobs that were submitted and then deleted before a job run started
:param dry_run: boolean indicating whether to actually
:return: exit code (0 for success, non-zero for failure)
"""
logger = fsurfer.log.get_logger()
conn = fsurfer.helpers.get_db_client()
cursor = conn.cursor()
job_query = "SELECT jobs.id, " \
" jobs.username, " \
" jobs.state, " \
" jobs.subject " \
"FROM freesurfer_interface.jobs AS jobs " \
"LEFT JOIN freesurfer_interface.job_run " \
" ON jobs.id = job_run.job_id " \
"WHERE jobs.state = 'DELETE PENDING' AND " \
" job_run.job_id IS NULL"
job_update = "UPDATE freesurfer_interface.jobs " \
"SET state = 'DELETED' " \
"WHERE id = %s;"
try:
cursor.execute(job_query)
for row in cursor.fetchall():
workflow_id = row[0]
username = row[1]
logger.info("Deleting workflow {0} for user {1}".format(workflow_id,
username))
deletion_list = []
# add input file
input_files = get_input_files(workflow_id)
if input_files is None:
logger.error("Can't find input files for " +
"workflow {0}".format(workflow_id))
else:
deletion_list.extend(input_files)
for entry in deletion_list:
if dry_run:
sys.stdout.write("Would delete {0}\n".format(entry))
else:
logger.info("Removing {0}".format(entry))
if not purge_workflow_file(entry):
logger.error("Can't remove {0} for job {1}".format(entry,
workflow_id))
logger.info("Setting workflow {0} to DELETED".format(workflow_id))
cursor.execute(job_update, [workflow_id])
if dry_run:
conn.rollback()
else:
conn.commit()
except psycopg2.Error as e:
logger.exception("Error: {0}".format(e))
return 1
finally:
conn.commit()
conn.close()
return 0
def delete_job():
"""
Delete all jobs in a delete pending state, stopping pegasus
workflows if needed
:return: exit code (0 for success, non-zero for failure)
"""
fsurfer.log.initialize_logging()
logger = fsurfer.log.get_logger()
parser = argparse.ArgumentParser(description="Process and remove old results")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Arguments for action
parser.add_argument('--dry-run', dest='dry_run',
action='store_true', default=False,
help='Mock actions instead of carrying them out')
parser.add_argument('--debug', dest='debug',
action='store_true', default=False,
help='Output debug messages')
args = parser.parse_args(sys.argv[1:])
if args.debug:
fsurfer.log.set_debugging()
if args.dry_run:
sys.stdout.write("Doing a dry run, no changes will be made\n")
conn = fsurfer.helpers.get_db_client()
cursor = conn.cursor()
job_query = "SELECT jobs.id, " \
" jobs.username, " \
" jobs.state, " \
" job_run.pegasus_ts, " \
" jobs.subject " \
"FROM freesurfer_interface.jobs AS jobs, " \
" freesurfer_interface.job_run AS job_run " \
"WHERE jobs.state = 'DELETE PENDING' AND " \
" jobs.id = job_run.job_id"
job_update = "UPDATE freesurfer_interface.jobs " \
"SET state = 'DELETED' " \
"WHERE id = %s;"
try:
cursor.execute(job_query)
for row in cursor.fetchall():
workflow_id = row[0]
username = row[1]
logger.info("Deleting workflow {0} for user {1}".format(workflow_id,
username))
# pegasus_ts is stored as datetime in the database, convert it to what we have on the fs
pegasus_ts = row[3]
if pegasus_ts is None:
# not submitted yet
logger.info("Workflow {0} not ".format(workflow_id) +
"submitted, updating")
cursor.execute(job_update, [workflow_id])
if args.dry_run:
conn.rollback()
else:
conn.commit()
continue
workflow_dir = os.path.join(fsurfer.FREESURFER_SCRATCH,
username,
'workflows',
'fsurf',
'pegasus',
'freesurfer',
pegasus_ts)
result_dir = os.path.join(fsurfer.FREESURFER_BASE,
username,
'workflows',
'output',
'fsurf',
'pegasus',
'freesurfer',
pegasus_ts)
if args.dry_run:
sys.stdout.write("Would run pegasus-remove "
"{0}\n".format(result_dir))
else:
try:
output = subprocess.check_output(['/usr/bin/pegasus-remove',
workflow_dir],
stderr=subprocess.STDOUT)
exit_code = 0
except subprocess.CalledProcessError as err:
exit_code = err.returncode
output = err.output
# job removed (code = 0) just now or it's been removed earlier
if exit_code == 0 or 'not found' in output:
# look for condor job id and wait a bit for pegasus to remove it
# so that we can delete the pegasus directories
job_id = re.match(r'Job (\d+.\d+) marked for removal', output)
if job_id is not None:
logger.info("Waiting for running jobs to be removed...\n")
count = 0
while True:
time.sleep(10)
try:
output = subprocess.check_output(["/usr/bin/condor_q",
job_id.group(1)])
except subprocess.CalledProcessError:
logger.exception("An error occurred while "
"checking for running "
"jobs, exiting...\n")
break
if 'pegasus-dagman' not in output:
break
count += 1
if count > 30:
logger.error("Can't remove job, exiting...\n")
break
else:
logger.error("Got error while removing workflow, "
"exitcode: {0} error: {1}".format(exit_code, output))
logger.info("Jobs removed, removing workflow directory\n")
try:
if not args.dry_run and os.path.exists(workflow_dir):
shutil.rmtree(workflow_dir)
except shutil.Error:
logger.exception("Can't remove directory at "
"{0}, exiting...\n".format(workflow_dir))
deletion_list = []
# add input file
input_files = get_input_files(workflow_id)
if input_files is None:
logger.error("Can't find input files for " +
"workflow {0}".format(workflow_id))
else:
deletion_list.extend(input_files)
# remove files in result dir
if os.path.isdir(result_dir):
for entry in os.listdir(result_dir):
deletion_list.append(os.path.join(result_dir, entry))
if os.path.exists(result_dir):
deletion_list.append(result_dir)
# delete output and log copied over after workflow completion
# if present
deletion_list.append(os.path.join(fsurfer.FREESURFER_BASE,
username,
'results',
'recon_all-{0}.log'.format(workflow_id)))
deletion_list.append(os.path.join(fsurfer.FREESURFER_BASE,
username,
'results',
"{0}_{1}_output.tar.bz2".format(workflow_id,
row[4])))
for entry in deletion_list:
if args.dry_run:
sys.stdout.write("Would delete {0}\n".format(entry))
else:
logger.info("Removing {0}".format(entry))
if not purge_workflow_file(entry):
logger.error("Can't remove {0} for job {1}".format(entry,
workflow_id))
logger.info("Setting workflow {0} to DELETED".format(workflow_id))
cursor.execute(job_update, [workflow_id])
if args.dry_run:
conn.rollback()
else:
conn.commit()
except psycopg2.Error as e:
logger.exception("Error: {0}".format(e))
return 1
finally:
conn.commit()
conn.close()
retcode = delete_incomplete_jobs()
return retcode
if __name__ == '__main__':
# workaround missing subprocess.check_output
if "check_output" not in dir(subprocess): # duck punch it in!
def check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python
on stdlib.
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
subprocess.check_output = check_output
sys.exit(delete_job())
| StarcoderdataPython |
6583492 | """
ipwatch.py - version 0.0.1
Released under MIT license
https://github.com/packetflare/ipwatch/
OSX Menu widget that displays the user's current public IP address and
associated informaton as detected by the service https://ipinfo.io/.
A request to ipinfo.io is triggered if the application detects a change
in any local interface addresses. A scheduled request is also sent
every two minutes. On detection in a change of the public address,
a notification is invoked.
For dependencies -
$ pip install pyobjc-framework-Cocoa
$ pip install pyObjC
"""
from Foundation import *
from AppKit import *
from PyObjCTools import AppHelper
from Foundation import NSUserNotification
from Foundation import NSUserNotificationCenter
from Foundation import NSUserNotificationDefaultSoundName
from SystemConfiguration import *
from collections import namedtuple
import httplib
import json
# check every 2 minutes. ipinfo.io allows for upto 1000 free requests a day.
PERIODIC_CHECK_INTERVAL = 120
class AppDelegate(NSObject):
state = 'idle'
# interval timer
timerStartFlag = False
# timer start time
startTime = NSDate.date()
def infoMenu(self) :
"""
Sub-menu "Details" which displays information such as hostname, ASN, etc.
"""
self.detailMenuItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_("Details...", None, '')
detailSubMenu = NSMenu.alloc().init()
# data is dictionary of the JSON response from ipinfo.io/json
for k, v in self.ipWatchApp.data.items() :
# TODO: order the listing
item = "%s: %s" % (k, v)
detailSubMenu.addItemWithTitle_action_keyEquivalent_(item, None, '')
self.detailMenuItem.setSubmenu_(detailSubMenu)
# position 0 specified as info sub-menu deleted and re-added later when updated
self.menu.insertItem_atIndex_(self.detailMenuItem, 0)
def applicationDidFinishLaunching_(self, sender):
"""
Render status bar
"""
NSLog("Application did finish launching.")
NSApp.setActivationPolicy_(NSApplicationActivationPolicyProhibited)
# item visible on status bar "IP x.x.x.x"
self.statusItem = NSStatusBar.systemStatusBar().statusItemWithLength_(NSVariableStatusItemLength)
self.statusItem.setTitle_(u"IP")
self.statusItem.setHighlightMode_(TRUE)
self.statusItem.setEnabled_(TRUE)
self.menu = NSMenu.alloc().init()
self.infoMenu()
# force probe to be sent to update public IP information
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Update now', 'updateNow:', '')
# self.menu.addItem_(menuitem)
#menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('test', 'test:', '')
self.menu.addItem_(menuitem)
self.statusItem.setMenu_(self.menu)
# seperator line
self.menu.addItem_(NSMenuItem.separatorItem())
# default action is quit
menuitem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'terminate:', '')
self.menu.addItem_(menuitem)
# probe periodic check timer
addedDate = self.startTime.dateByAddingTimeInterval_(30)
self.timer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(
addedDate, PERIODIC_CHECK_INTERVAL, self,
'checkTimerCallback:', None, True)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.timer, NSDefaultRunLoopMode)
#self.timer.fire()
# local interface check timer. Currently set to 1/2 a second
self.ifaceCheckTimer = NSTimer.alloc().initWithFireDate_interval_target_selector_userInfo_repeats_(
self.startTime, 1/2., self,
'ifaceTimerCallback:', None, True)
NSRunLoop.currentRunLoop().addTimer_forMode_(self.ifaceCheckTimer, NSDefaultRunLoopMode)
self.ifaceCheckTimer.fire()
def ifaceTimerCallback_(self, notification) :
"""
Callback for interface address check timer
"""
self.ipWatchApp.checkForIfaceChange()
def test_(self, notification) :
""" not used
"""
self.ipWatchApp.testing()
return
def updateNow_(self, notification):
"""
callback when user clicks update menu item
"""
self.ipWatchApp.updateNow()
def checkTimerCallback_(self, notification):
"""
callback for periodic check of the public IP address
"""
self.ipWatchApp.updateNow()
class IPWatchApp :
# public IP address in last check
prevIPAddress = None
# Keep track of adaptor interface addresses. If changed, will triger a check
prevIFaceAddrs = []
def __init__(self) :
self.data = {'ip' : ''}
app = NSApplication.sharedApplication()
delegate = AppDelegate.alloc().init()
# delegate has to be able to access members of this class
delegate.ipWatchApp = self
# and vice-versa
self.nsapp = delegate
app.setDelegate_(delegate)
return
def fetchIPDetails(self) :
"""
Preforms query to https://ipinfo.io/ which returns user's public IP address and other
details in JSON format
"""
try :
conn = httplib.HTTPSConnection("ipinfo.io")
conn.request("GET", "/json")
response = conn.getresponse()
self.data = json.load(response)
except Exception as e :
self.data = {'ip' : 'Error'}
if 'error' in self.data :
self.data['ip'] = 'Error'
def sendNotification(self, title, body) :
"""
Notification pop-up when IP address change detected
"""
notification = NSUserNotification.alloc().init()
center = NSUserNotificationCenter.defaultUserNotificationCenter()
notification.setTitle_(title)
notification.setInformativeText_(body)
center.deliverNotification_(notification)
def updateNow(self) :
self.fetchIPDetails()
currentIPAddress = self.data['ip']
# public IP address has changed
if self.prevIPAddress != currentIPAddress :
# if prevIPAddress is None, then assume program has just been initialised and
# user does not want to see a notification
if self.prevIPAddress is not None :
textBody = "Was %s\nNow %s" % (self.prevIPAddress, currentIPAddress)
self.sendNotification("Public IP address changed", textBody)
# update the details menu item
self.nsapp.menu.removeItem_(self.nsapp.detailMenuItem)
self.nsapp.infoMenu()
# update the title on the menu bar
self.nsapp.statusItem.setTitle_(u"IP: " + currentIPAddress)
self.prevIPAddress = currentIPAddress
return currentIPAddress
def checkForIfaceChange(self) :
"""
Enumerates all interface addresses on the host
# see: http://kbyanc.blogspot.com/2010/10/python-enumerating-ip-addresses-on.html
"""
ds = SCDynamicStoreCreate(None, 'GetIPv4Addresses', None, None)
# Get all keys matching pattern State:/Network/Service/[^/]+/IPv4
pattern = SCDynamicStoreKeyCreateNetworkServiceEntity(None, kSCDynamicStoreDomainState, kSCCompAnyRegex, kSCEntNetIPv4)
patterns = CFArrayCreate(None, (pattern, ), 1, kCFTypeArrayCallBacks)
valueDict = SCDynamicStoreCopyMultiple(ds, None, patterns)
# Approach to detech a change is to store addresses in a list and calculate the intersection of the prior address
# list. If number of elements that intersect is differnet to the list size then they are different
currentIFaceAddrs = []
for serviceDict in valueDict.values():
for address in serviceDict[u'Addresses']:
currentIFaceAddrs.append(address)
# use the max length of either list as the length
if len(set(currentIFaceAddrs).intersection(self.prevIFaceAddrs)) != max(len(self.prevIFaceAddrs), len(currentIFaceAddrs)) :
self.updateNow()
self.prevIFaceAddrs = list(currentIFaceAddrs)
if __name__ == "__main__":
ipWatchApp = IPWatchApp()
AppHelper.runEventLoop()
| StarcoderdataPython |
8063523 | <filename>pydocx/openxml/drawing/transform_2d.py
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.models import XmlModel, XmlChild, XmlAttribute
from pydocx.openxml.drawing.extents import Extents
class Transform2D(XmlModel):
XML_TAG = 'xfrm'
extents = XmlChild(type=Extents)
rotate = XmlAttribute(name='rot', default=None)
| StarcoderdataPython |
3207925 | # -*- coding: utf-8 -*-
from __future__ import (
division, absolute_import, print_function, unicode_literals,
)
from builtins import * # noqa
from future.builtins.disabled import * # noqa
from magic_constraints.exception import MagicSyntaxError, MagicTypeError
def transform_to_slots(constraints_package, *args, **kwargs):
class UnFill(object):
pass
plen = len(constraints_package.parameters)
if len(args) > plen:
raise MagicSyntaxError(
'argument length unmatched.',
parameters=constraints_package.parameters,
args=args,
)
slots = [UnFill] * plen
unfill_count = plen
# 1. fill args.
for i, val in enumerate(args):
slots[i] = val
unfill_count -= len(args)
# 2. fill kwargs.
for key, val in kwargs.items():
if key not in constraints_package.name_hash:
raise MagicSyntaxError(
'invalid keyword argument',
parameters=constraints_package.parameters,
key=key,
)
i = constraints_package.name_hash[key]
if slots[i] is not UnFill:
raise MagicSyntaxError(
'key reassignment error.',
parameters=constraints_package.parameters,
key=key,
)
slots[i] = val
unfill_count -= 1
# 3. fill defaults if not set.
# 3.1. deal with the case that default not exists.
default_begin = constraints_package.start_of_defaults
if default_begin < 0:
default_begin = plen
# 3.2 fill defaults.
for i in range(default_begin, plen):
parameter = constraints_package.parameters[i]
j = constraints_package.name_hash[parameter.name]
if slots[j] is UnFill:
slots[j] = parameter.default
unfill_count -= 1
# 4. test if slots contains UnFill.
if unfill_count != 0:
raise MagicSyntaxError(
'slots contains unfilled argument(s).',
parameters=constraints_package.parameters,
slots=slots,
)
return slots
def check_and_bind_arguments(parameters, slots, bind_callback):
plen = len(parameters)
for i in range(plen):
arg = slots[i]
parameter = parameters[i]
wrapper = parameter.wrapper_for_deferred_checking()
# defer checking by wrapping the element of slot.
if wrapper:
slots[i] = wrapper(arg)
# check now.
elif not parameter.check_instance(arg):
raise MagicTypeError(
'argument unmatched.',
parameter=parameter,
argument=arg,
)
# bind.
bind_callback(parameter.name, arg)
| StarcoderdataPython |
3382952 | import codecs
import sys
def transformer(data_in, data_out, vocab):
id2tokens = {}
tokens2id = {}
with codecs.open(vocab, "r") as f1:
for line in f1.readlines():
token, id = line.strip().split("##")
id = int(id)
id2tokens[id] = token
tokens2id[token] = id
tokens_count = len(tokens2id)
with codecs.open(data_in, 'r') as f2:
with codecs.open(data_out, 'w') as f3:
for line in f2.readlines():
line = line.strip()
tokens = line.split()
for token in tokens:
id = tokens2id.get(token, tokens2id["<unk>"])
f3.write(str(id) + ' ')
f3.write('\n')
if __name__ == '__main__':
args = sys.argv
data_in = args[1]
data_out = args[2]
vocab = args[3]
transformer(data_in, data_out, vocab) | StarcoderdataPython |
11356324 | from crits.actors.actor import Actor
from crits.services.analysis_result import AnalysisResult
from crits.campaigns.campaign import Campaign
from crits.certificates.certificate import Certificate
from crits.comments.comment import Comment
from crits.domains.domain import Domain
from crits.emails.email import Email
from crits.events.event import Event
from crits.indicators.indicator import Indicator
from crits.ips.ip import IP
from crits.pcaps.pcap import PCAP
from crits.raw_data.raw_data import RawData
from crits.samples.sample import Sample
from crits.screenshots.screenshot import Screenshot
from crits.targets.target import Target
def getHREFLink(object, object_type):
"""
Creates the URL for the details button used by all object types
"""
#comment is a special case since the link takes you to the object the comment is on
if object_type == "Comment":
object_type = object["obj_type"]
#setting the first part of the url, rawdata is the only object type thats
#difference from its type
href = "/"
if object_type == "RawData":
href += "raw_data/"
elif object_type == "AnalysisResult":
href += "services/analysis_results/"
else:
href += object_type.lower()+"s/"
#settings the second part of the url, screenshots and targets are the only
#ones that are different from being 'details'
if object_type == "Screenshot":
href += "render/"
elif object_type == "Target":
href += "info/"
#setting key here
key = "email_address"
else:
href += "details/"
#setting the key for the last section of the url since its different for
#every object type
if "url_key" in object:
key = "url_key"
elif object_type == "Campaign":
key = "name"
elif object_type == "Certificate" or object_type == "PCAP" or object_type == "Sample":
key = "md5"
elif object_type == "Domain":
key = "domain"
elif object_type == "IP":
key = "ip"
elif not object_type == "Target" and "_id" in object:
key = "_id"
else:
key = "id"
#adding the last part of the url
if key in object:
href += unicode(object[key]) + "/"
return href
def get_obj_name_from_title(tableTitle):
"""
Returns the String pertaining to the type of the table. Used only
when editing a default dashboard table since they do not have types saved,
it gets it from the hard-coded title.
"""
if tableTitle == "Recent Emails":
return "Email"
elif tableTitle == "Recent Indicators":
return "Indicator"
elif tableTitle == "Recent Samples":
return "Sample"
elif tableTitle == "Top Backdoors":
return "Backdoor"
elif tableTitle == "Top Campaigns":
return "Campaign"
elif tableTitle == "Counts":
return "Count"
def get_obj_type_from_string(objType):
"""
Returns the Object type from the string saved to the table. This
is used in order to build the query to be run.
Called by generate_search_for_saved_table and get_table_data
"""
if objType == "Actor":
return Actor
elif objType == "AnalysisResult":
return AnalysisResult
elif objType == "Campaign":
return Campaign
elif objType == "Certificate":
return Certificate
elif objType == "Comment":
return Comment
elif objType == "Domain":
return Domain
elif objType == "Email":
return Email
elif objType == "Event":
return Event
elif objType == "Indicator":
return Indicator
elif objType == "IP":
return IP
elif objType == "PCAP":
return PCAP
elif objType == "RawData":
return RawData
elif objType == "Sample":
return Sample
elif objType == "Screenshot":
return Screenshot
elif objType == "Target":
return Target
return None
| StarcoderdataPython |
3477394 | import re
from typing import List
from pygls.lsp.types.basic_structures import (
Diagnostic,
DiagnosticSeverity,
Position,
Range,
)
from pygls.workspace import Document
from server.ats.trees.common import BaseTree, YamlNode
class ValidationHandler:
def __init__(self, tree: BaseTree, document: Document) -> None:
self._tree = tree
self._diagnostics: List[Diagnostic] = []
self._document = document
def _add_diagnostic(
self,
node: YamlNode,
message: str = "",
diag_severity: DiagnosticSeverity = None,
):
if node:
self._diagnostics.append(
Diagnostic(
range=Range(
start=Position(
line=node.start_pos[0], character=node.start_pos[1]
),
end=Position(line=node.end_pos[0], character=node.end_pos[1]),
),
message=message,
severity=diag_severity,
)
)
def _add_diagnostic_for_range(
self,
message: str = "",
range_start_tuple=None,
range_end_tuple=None,
diag_severity: DiagnosticSeverity = None,
):
if range_start_tuple and range_end_tuple:
self._diagnostics.append(
Diagnostic(
range=Range(
start=Position(
line=range_start_tuple[0], character=range_start_tuple[1]
),
end=Position(
line=range_end_tuple[0], character=range_end_tuple[1]
),
),
message=message,
severity=diag_severity,
)
)
def _validate_no_duplicates_in_inputs(self):
message = "Multiple declarations of input '{}'"
inputs_names_list = [input.key.text for input in self._tree.get_inputs()]
for input_node in self._tree.get_inputs():
if inputs_names_list.count(input_node.key.text) > 1:
self._add_diagnostic(
input_node.key, message=message.format(input_node.key.text)
)
def _validate_no_reserved_words_in_inputs_prefix(self):
message = "input '{}' contains a reserved word '{}'"
reserved_words = ["colony", "torque"]
for input_node in self._tree.get_inputs():
for reserved in reserved_words:
if input_node.key.text.lower().startswith(reserved):
self._add_diagnostic(
input_node.key,
message=message.format(input_node.key.text, reserved),
)
def _validate_no_duplicates_in_outputs(self):
if hasattr(self._tree, "outputs"):
message = (
"Multiple declarations of output '{}'. Outputs are not case sensitive."
)
outputs_names_list = [
output.text.lower() for output in self._tree.get_outputs()
]
for output_node in self._tree.get_outputs():
if outputs_names_list.count(output_node.text.lower()) > 1:
self._add_diagnostic(
output_node, message=message.format(output_node.text)
)
def _check_for_deprecated_properties(self, deprecated_properties):
message_dep = "Deprecated property '{}'."
message_replace = "Please use '{}' instead."
line_num = 0
for line in self._document.lines:
for prop in deprecated_properties.keys():
found = re.findall("^[^#\\n]*(\\b" + prop + "\\b:)", line)
if len(found) > 0:
col = line.find(prop)
message = message_dep.format(prop)
if deprecated_properties[prop]:
message += " " + message_replace.format(
deprecated_properties[prop]
)
self._add_diagnostic_for_range(
message,
range_start_tuple=(line_num, col),
range_end_tuple=(line_num, col + len(prop)),
diag_severity=DiagnosticSeverity.Warning,
)
line_num += 1
def validate(self):
# errors
self._validate_no_duplicates_in_inputs()
self._validate_no_duplicates_in_outputs()
self._validate_no_reserved_words_in_inputs_prefix()
return self._diagnostics
| StarcoderdataPython |
1854384 | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from time import sleep
from datetime import datetime, timedelta
from knack.log import get_logger
from knack.util import CLIError
from azext_iot.common.shared import SdkType, JobStatusType, JobType, JobVersionType
from azext_iot.common.utility import unpack_msrest_error, process_json_arg
from azext_iot.operations.generic import _execute_query, _process_top
from azext_iot.iothub.providers.base import IoTHubProvider, CloudError, SerializationError
logger = get_logger(__name__)
class JobProvider(IoTHubProvider):
def get(self, job_id):
job_result = self._get(job_id)
if "status" in job_result and job_result["status"] == JobStatusType.unknown.value:
# Replace 'unknown' v2 result with v1 result
job_result = self._get(job_id, JobVersionType.v1)
return job_result
def _get(self, job_id, job_version=JobVersionType.v2):
service_sdk = self.get_sdk(SdkType.service_sdk)
try:
if job_version == JobVersionType.v2:
return service_sdk.job_client.get_job(id=job_id, raw=True).response.json()
return self._convert_v1_to_v2(service_sdk.job_client.get_import_export_job(id=job_id))
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def cancel(self, job_id):
job_result = self.get(job_id)
if "type" in job_result and job_result["type"] in [JobType.exportDevices.value, JobType.importDevices.value]:
# v1 Job
return self._cancel(job_id, JobVersionType.v1)
# v2 Job
return self._cancel(job_id)
def _cancel(self, job_id, job_version=JobVersionType.v2):
service_sdk = self.get_sdk(SdkType.service_sdk)
try:
if job_version == JobVersionType.v2:
return service_sdk.job_client.cancel_job(id=job_id, raw=True).response.json()
return service_sdk.job_client.cancel_import_export_job(id=job_id)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def list(self, job_type=None, job_status=None, top=None):
top = _process_top(top)
jobs_collection = []
if (
job_type not in [JobType.exportDevices.value, JobType.importDevices.value]
or not job_type
):
jobs_collection.extend(
self._list(job_type=job_type, job_status=job_status, top=top)
)
if (
job_type in [JobType.exportDevices.value, JobType.importDevices.value]
or not job_type
):
if (top and len(jobs_collection) < top) or not top:
jobs_collection.extend(self._list(job_version=JobVersionType.v1))
# v1 API has no means of filtering service side :(
jobs_collection = self._filter_jobs(
jobs=jobs_collection, job_type=job_type, job_status=job_status
)
# Trim based on top, since there is no way to pass a 'top' into the v1 API :(
if top:
jobs_collection = jobs_collection[:top]
return jobs_collection
def _list(self, job_type=None, job_status=None, top=None, job_version=JobVersionType.v2):
service_sdk = self.get_sdk(SdkType.service_sdk)
jobs_collection = []
try:
if job_version == JobVersionType.v2:
query = [job_type, job_status]
query_method = service_sdk.job_client.query_jobs
jobs_collection.extend(_execute_query(query, query_method, top))
elif job_version == JobVersionType.v1:
jobs_collection.extend(service_sdk.job_client.get_import_export_jobs())
jobs_collection = [self._convert_v1_to_v2(job) for job in jobs_collection]
return jobs_collection
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def create(
self,
job_id,
job_type,
start_time=None,
query_condition=None,
twin_patch=None,
method_name=None,
method_payload=None,
method_connect_timeout=30,
method_response_timeout=30,
ttl=3600,
wait=False,
poll_interval=10,
poll_duration=600,
):
from azext_iot.sdk.iothub.service.models import (
CloudToDeviceMethod,
JobRequest
)
if (
job_type
in [JobType.scheduleUpdateTwin.value, JobType.scheduleDeviceMethod.value]
and not query_condition
):
raise CLIError(
"The query condition is required when job type is {} or {}. "
"Use query condition '*' if you need to run job on all devices.".format(
JobType.scheduleUpdateTwin.value, JobType.scheduleDeviceMethod.value
)
)
if poll_duration < 1:
raise CLIError("--poll-duration must be greater than 0!")
if poll_interval < 1:
raise CLIError("--poll-interval must be greater than 0!")
if job_type == JobType.scheduleUpdateTwin.value:
if not twin_patch:
raise CLIError(
"The {} job type requires --twin-patch.".format(
JobType.scheduleUpdateTwin.value
)
)
twin_patch = process_json_arg(twin_patch, argument_name="twin-patch")
if not isinstance(twin_patch, dict):
raise CLIError(
"Twin patches must be objects. Received type: {}".format(
type(twin_patch)
)
)
elif job_type == JobType.scheduleDeviceMethod.value:
if not method_name:
raise CLIError(
"The {} job type requires --method-name.".format(
JobType.scheduleDeviceMethod.value
)
)
method_payload = process_json_arg(
method_payload, argument_name="method-payload"
)
job_request = JobRequest(
job_id=job_id,
type=job_type,
start_time=start_time,
max_execution_time_in_seconds=ttl,
query_condition=query_condition,
)
if job_type == JobType.scheduleUpdateTwin.value:
# scheduleUpdateTwin job type is a force update, which only accepts '*' as the Etag.
twin_patch["etag"] = "*"
job_request.update_twin = twin_patch
elif job_type == JobType.scheduleDeviceMethod.value:
job_request.cloud_to_device_method = CloudToDeviceMethod(
method_name=method_name,
connect_timeout_in_seconds=method_connect_timeout,
response_timeout_in_seconds=method_response_timeout,
payload=method_payload,
)
service_sdk = self.get_sdk(SdkType.service_sdk)
try:
job_result = service_sdk.job_client.create_job(id=job_id, job_request=job_request, raw=True).response.json()
if wait:
logger.info("Waiting for job finished state...")
current_datetime = datetime.now()
end_datetime = current_datetime + timedelta(seconds=poll_duration)
while True:
job_result = self._get(job_id)
if "status" in job_result:
refreshed_job_status = job_result["status"]
logger.info("Refreshed job status: '%s'", refreshed_job_status)
if refreshed_job_status in [
JobStatusType.completed.value,
JobStatusType.failed.value,
JobStatusType.cancelled.value,
]:
break
if datetime.now() > end_datetime:
logger.info("Job not completed within poll duration....")
break
logger.info("Waiting %d seconds for next refresh...", poll_interval)
sleep(poll_interval)
return job_result
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except SerializationError as se:
# ISO8601 parsing is handled by msrest
raise CLIError(se)
def _convert_v1_to_v2(self, job_v1):
v2_result = {}
# For v1 jobs, startTime is the same as createdTime
v2_result["createdTime"] = job_v1.start_time_utc
v2_result["startTime"] = job_v1.start_time_utc
v2_result["endTime"] = job_v1.end_time_utc
v2_result["jobId"] = job_v1.job_id
v2_result["status"] = job_v1.status
v2_result["type"] = job_v1.type
v2_result["progress"] = job_v1.progress
v2_result["excludeKeysInExport"] = job_v1.exclude_keys_in_export
if job_v1.failure_reason:
v2_result["failureReason"] = job_v1.failure_reason
v2_result.update(job_v1.additional_properties)
return v2_result
def _filter_jobs(self, jobs, job_type=None, job_status=None):
if job_type:
jobs = [job for job in jobs if job["type"] == job_type]
if job_status:
jobs = [job for job in jobs if job["status"] == job_status]
return jobs
| StarcoderdataPython |
3429135 | <filename>hokonui/exchanges/mock.py
''' Module for Exchange base class '''
# pylint: disable=duplicate-code, line-too-long
import time
from hokonui.models.ticker import Ticker
from hokonui.utils.helpers import apply_format_level
class Mock():
''' Class Mock exchanges '''
TICKER_URL = None
ORDER_BOOK_URL = None
VOLUME_URL = None
PRICE_URL = None
NAME = 'Mock'
CCY_DEFAULT = 'USD'
MOCK_PRICE = 1.2345
MOCK_ASK_QTY = 12.88
MOCK_BID_QTY = 12.99
@classmethod
def _current_price_extractor(cls, data):
''' Method for extracting current price '''
assert cls is not None
return data["price"]
@classmethod
def _current_bid_extractor(cls, data):
''' Method for extracting bid price '''
assert cls is not None
return data["bid"]
@classmethod
def _current_ask_extractor(cls, data):
''' Method for extracting ask price '''
assert cls is not None
return data["ask"]
@classmethod
def _current_orders_extractor(cls, data, max_qty=100):
''' Method for extracting orders '''
assert cls is not None
orders = {}
bids = {}
asks = {}
buymax = 0
sellmax = 0
for level in data["bids"]:
if buymax > max_qty:
pass
else:
asks[apply_format_level(level["price"],
'.2f')] = "{:.8f}".format(
float(level["quantity"]))
buymax = buymax + float(level["quantity"])
for level in data["asks"]:
if sellmax > max_qty:
pass
else:
bids[apply_format_level(level["price"],
'.2f')] = "{:.8f}".format(
float(level["quantity"]))
sellmax = sellmax + float(level["quantity"])
orders["source"] = cls.NAME
orders["bids"] = bids
orders["asks"] = asks
orders["timestamp"] = str(int(time.time()))
print(orders)
return orders
@classmethod
def _current_ticker_extractor(cls, data):
''' Method for extracting ticker '''
assert cls is not None
assert data is not None
return Ticker(cls.CCY_DEFAULT, data["ask"], data["bid"]).to_json()
@classmethod
def get_current_price(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving last price '''
assert cls is not None
if ccy is not None:
print(ccy)
if params is not None:
print(params)
if body is not None:
print(body)
if header is not None:
print(header)
data = {"price": cls.MOCK_PRICE}
return cls._current_price_extractor(data)
@classmethod
def get_current_bid(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current bid price '''
data = {"bid": cls.MOCK_PRICE}
if ccy is not None:
print(ccy)
if params is not None:
print(params)
if body is not None:
print(body)
if header is not None:
print(header)
return cls._current_bid_extractor(data)
@classmethod
def get_current_ask(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current ask price '''
data = {"ask": cls.MOCK_PRICE}
if ccy is not None:
print(ccy)
if params is not None:
print(params)
if body is not None:
print(body)
if header is not None:
print(header)
return cls._current_ask_extractor(data)
@classmethod
def get_current_ticker(cls, ccy=None, params=None, body=None, header=None):
''' Method for retrieving current ticker '''
data = {"ask": cls.MOCK_PRICE, "bid": cls.MOCK_PRICE}
if ccy is not None:
print(ccy)
if params is not None:
print(params)
if body is not None:
print(body)
if header is not None:
print(header)
return cls._current_ticker_extractor(data)
@classmethod
def get_current_orders(cls, ccy=None, params=None, body=None, max_qty=5):
''' Method for retrieving current orders '''
data = {
"asks": [{
"price": cls.MOCK_PRICE,
"quantity": "12.99"
}],
"bids": [{
"price": cls.MOCK_PRICE,
"quantity": "12.88"
}]
}
if ccy is not None:
print(ccy)
if params is not None:
print(params)
if body is not None:
print(body)
if max_qty is not None:
print(max_qty)
return cls._current_orders_extractor(data, max_qty)
| StarcoderdataPython |
8000561 | import re
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
multipliers = re.findall("\d+",stringe)
limitop = int(len(multipliers)/2)
total = []
for i in range(0,limitop):
total.append(str((int(multipliers[i])*int(multipliers[limitop+i]))))
print (" ".join(total)) | StarcoderdataPython |
3507712 | from manim import *
import networkx as nx
import json
import ast
class Geometry:
def __init__(self):
pass
def get_intersection(self, line1, line2):
xdiff = np.array([line1[0][0, 0] - line1[1][0, 0], line2[0][0, 0] - line2[1][0, 0]]).reshape((2, 1))
ydiff = np.array([line1[0][1, 0] - line1[1][1, 0], line2[0][1, 0] - line2[1][1, 0]]).reshape((2, 1))
def det(a, b):
return a[0, 0] * b[1, 0] - a[1, 0] * b[0, 0]
div = det(xdiff, ydiff)
if div == 0:
return None
d = np.array([det(*line1), det(*line2)]).reshape((2, 1))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return np.array([x, y]).reshape((2, 1))
def get_line_segment_intersection(self, line1, line2):
r = self.get_intersection(line1, line2)
if r is not None:
if line1[0][0, 0] <= r[0, 0] <= line1[1][0, 0] and line1[0][1, 0] <= r[1, 0] <= line1[1][1, 0]:
return r
if line1[1][0, 0] <= r[0, 0] <= line1[0][0, 0] and line1[1][1, 0] <= r[1, 0] <= line1[0][1, 0]:
return r
if line1[1][0, 0] <= r[0, 0] <= line1[0][0, 0] and line1[0][1, 0] <= r[1, 0] <= line1[1][1, 0]:
return r
if line1[0][0, 0] <= r[0, 0] <= line1[1][0, 0] and line1[1][1, 0] <= r[1, 0] <= line1[0][1, 0]:
return r
return None
def get_angle(self, v1, v2):
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# r = d - 2 (d.n) n
def get_force_reflection(self, line1):
l_dir = np.array([line1[0][0] - line1[1][0], line1[0][1] - line1[1][1]]).reshape((2, 1))
if l_dir[1, 0] != 0:
y = -1 * l_dir[0, 0] / l_dir[1, 0]
p_dir = np.array([1, y]).reshape((2, 1))
else:
x = -1 * l_dir[1, 0] / l_dir[0, 0]
p_dir = np.array([x, 1]).reshape((2, 1))
p_dir_norm = p_dir / np.linalg.norm(p_dir)
return p_dir_norm
class Node:
def __init__(self, id, label, priority):
self.id = id
self.priority = priority
self.n_eq = Tex("$\\frac{%s}{%s}$" % (str(label), str(priority)))
class AnimPriorityQueue:
def __init__(self, q_head, q_screen):
self.q_head = q_head
self.q_screen = q_screen
self.content = []
def enqueue(self, id, label, priority):
i = 0
while i < len(self.content):
if self.content[i].priority >= priority:
break
else:
i += 1
vg = VGroup()
vg.add(*[x.n_eq for x in self.content[i:]])
self.q_screen.play(ApplyMethod(vg.shift, RIGHT))
n = Node(id, label, priority)
n.n_eq.move_to(self.q_head + i * RIGHT)
self.q_screen.play(Write(n.n_eq))
self.content = self.content[0:i] + [n] + self.content[i:]
def dequeue(self, h_callback=None):
h = None
if len(self.content) > 0:
h = self.content[0]
if h_callback is not None:
h_callback(h)
self.content = self.content[1:]
vg = VGroup()
vg.add(*[x.n_eq for x in self.content])
self.q_screen.play(ApplyMethod(vg.shift, LEFT))
return h
def is_empty(self):
return len(self.content) == 0
class AnimQueue:
def __init__(self, cur_tail, q_screen):
self.cur_tail = cur_tail
self.q_screen = q_screen
self.content = []
def enqueue(self, val):
n_eq = Tex("$%s$" % str(val))
n_eq.move_to(self.cur_tail)
self.q_screen.play(Write(n_eq))
self.cur_tail += RIGHT
self.content.append(n_eq)
def enqueue_anim_elem(self, elem):
self.q_screen.play(ApplyMethod(elem.move_to, self.cur_tail))
self.cur_tail += RIGHT
self.content.append(elem)
def dequeue(self, h_callback=None):
h = None
if len(self.content) > 0:
h = self.content[0]
if h_callback is not None:
h_callback(h)
self.content = self.content[1:]
vg = VGroup()
vg.add(*self.content)
self.q_screen.play(ApplyMethod(vg.shift, LEFT))
self.cur_tail += LEFT
return h
class AnimStack:
def __init__(self, top, scr):
self.top = top
self.scr = scr
self.stack = []
def push(self, val):
if len(self.stack) > 0:
self.scr.play(ApplyMethod(VGroup(*self.stack).shift, RIGHT))
n_eq = Tex("$%s$" % str(val))
n_eq.move_to(self.top)
self.scr.play(Write(n_eq))
self.stack.insert(0, n_eq)
def push_anim_elem(self, elem):
if len(self.stack) > 0:
self.scr.play(ApplyMethod(VGroup(*self.stack).shift, RIGHT))
self.scr.play(ApplyMethod(elem.move_to, self.top))
self.stack.append(elem)
def pop(self, pop_callback=None):
t = None
if len(self.stack) > 0:
t = self.stack[0]
if pop_callback is not None:
pop_callback(t)
self.stack = self.stack[1:]
if len(self.stack) > 0:
self.scr.play(ApplyMethod(VGroup(*self.stack).shift, LEFT))
return t
class LinedCode:
def __init__(self, code, c_screen, text_scale=0.6):
self.prev_highlight = []
self.code_tex = []
self.c_screen = c_screen
for i in range(len(code)):
indent = 0
for j in range(len(code[i])):
if code[i][j] == ' ':
indent += 0.5
else:
break
ct = Tex(r"%s" % code[i])
ct = ct.scale(text_scale)
ct = ct.to_edge()
ct = ct.shift(DOWN * 0.4 * i)
ct = ct.shift(RIGHT * 0.4 * indent)
self.code_tex.append(ct)
def highlight(self, new_lines):
for l in self.prev_highlight:
self.c_screen.play(ApplyMethod(self.code_tex[l].set_color, WHITE), run_time=0.15)
self.prev_highlight = []
for ln in new_lines:
self.c_screen.play(ApplyMethod(self.code_tex[ln].set_color, BLUE), run_time=0.15)
self.prev_highlight.append(ln)
class GridNetwork(nx.Graph):
def __init__(self, topo_file, configs, **attr):
super().__init__(**attr)
radius = configs["radius"] if "radius" in configs else 0.35
shift = configs["shift"] if "shift" in configs else 0 * RIGHT
weights = iter([1, 11, 7, 6, 8, 3, 2, 9, 12, 5, 4, 10])
n_col = 3
with open(topo_file) as json_file:
data = json.load(json_file)
for n in data["nodes"]:
nv = ast.literal_eval(n)
self.add_node(nv, circle=None, id=nv[0] * n_col + nv[1], label=None, neighbors=None)
c = Circle(radius=radius)
c.move_to(1.5 * DOWN * nv[0] + 1.5 * RIGHT * (nv[1] - n_col / 2) + shift)
c.set_fill(PINK, opacity=0.5)
self.nodes[nv]["circle"] = c
#
n_eq = Tex("$v_%d$" % self.nodes[nv]["id"])
n_eq.move_to(c.get_center())
self.nodes[nv]["label"] = n_eq
for n1 in data["edges"]:
for n2 in data["edges"][n1]:
n1v = ast.literal_eval(n1)
n2v = ast.literal_eval(n2)
self.add_edge(n1v, n2v, line=None, w=next(weights), w_label=None)
#
r1, r2 = self.get_line_coords(n1v, n2v)
line = Line(r1, r2).set_color(RED)
self.edges[(n1v, n2v)]["line"] = line
#
if ast.literal_eval(data["weighted"]) and "weighted" in configs and configs["weighted"]:
wl = Tex("$%d$" % self.edges[(n1v, n2v)]["w"]).scale(0.8)
if "wxs" in data["edges"][n1][n2]:
wl.move_to((r1 + r2) / 2 + data["edges"][n1][n2]["wxs"] * RIGHT * 0.3)
if "wys" in data["edges"][n1][n2]:
wl.move_to((r1 + r2) / 2 + data["edges"][n1][n2]["wys"] * DOWN * 0.3)
self.edges[(n1v, n2v)]["w_label"] = wl
def get_line_coords(self, src_node, dst_node):
if src_node[0] < dst_node[0]:
r1 = self.nodes[src_node]["circle"].get_center() - [0, self.nodes[src_node]["circle"].radius, 0]
r2 = self.nodes[dst_node]["circle"].get_center() + [0, self.nodes[src_node]["circle"].radius, 0]
elif src_node[1] < dst_node[1]:
r1 = self.nodes[src_node]["circle"].get_center() + [self.nodes[src_node]["circle"].radius, 0, 0]
r2 = self.nodes[dst_node]["circle"].get_center() - [self.nodes[src_node]["circle"].radius, 0, 0]
elif dst_node[0] < src_node[0]:
r2 = self.nodes[dst_node]["circle"].get_center() - [0, self.nodes[src_node]["circle"].radius, 0]
r1 = self.nodes[src_node]["circle"].get_center() + [0, self.nodes[src_node]["circle"].radius, 0]
elif dst_node[1] < src_node[1]:
r2 = self.nodes[dst_node]["circle"].get_center() + [self.nodes[src_node]["circle"].radius, 0, 0]
r1 = self.nodes[src_node]["circle"].get_center() - [self.nodes[src_node]["circle"].radius, 0, 0]
return r1, r2 | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.