text stringlengths 38 1.54M |
|---|
import time
from serial import SerialException
from opentrons.util.log import get_logger
log = get_logger(__name__)
class Connection(object):
def __init__(self, sp, port='', baudrate=115200, timeout=0.02):
sp.port = port
sp.baudrate = baudrate
sp.timeout = timeout
self.serial_port = sp
def device(self):
return self.serial_port
def name(self):
return str(self.serial_port.port)
def open(self):
if self.serial_port.isOpen():
self.serial_port.close()
self.serial_port.open()
def close(self):
self.serial_port.close()
def isOpen(self):
return self.serial_port.isOpen()
def serial_pause(self):
time.sleep(self.serial_port.timeout)
def data_available(self):
return int(self.serial_port.in_waiting)
def flush_input(self):
while self.data_available():
self.serial_port.reset_input_buffer()
self.serial_pause()
def wait_for_data(self, timeout=30):
end_time = time.time() + timeout
while end_time > time.time():
if self.data_available():
return
raise RuntimeWarning(
'No data after {} second(s)'.format(timeout))
def readline_string(self, timeout=30):
end_time = time.time() + timeout
while end_time > time.time():
self.wait_for_data(timeout=timeout)
try:
res = str(self.serial_port.readline().decode().strip())
except SerialException:
self.close()
self.open()
return self.readline_string(timeout=end_time - time.time())
if res:
return res
raise RuntimeWarning(
'No new line from Smoothie after {} second(s)'.format(timeout))
def write_string(self, data_string):
self.serial_port.write(data_string.encode())
self.serial_port.flush()
|
#! /usr/bin/env python
"""
Author: LiangLiang ZHENG
Date:
File Description
"""
from __future__ import print_function
import sys
import argparse
class Solution(object):
def findLHS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
'''
只需要找到临接的数比方 3,2,2,2,2, 或1,2,2,2,2 因为是subsequence 不是连续的,可以直接算counter
'''
C = collections.Counter(nums)
res = 0
for n in C:
if C[n+1] != 0:
res = max(res, C[n] + C[n+1])
return res
def main():
pass
if __name__ == "__main__":
main()
|
# Generated by Django 2.1.1 on 2018-09-18 08:28
from django.db import migrations
def set_regions_departments(apps, schema_editor):
Perimeter = apps.get_model("geofr", "Perimeter")
perimeters = Perimeter.objects.all()
for perimeter in perimeters:
if perimeter.region:
perimeter.regions = [perimeter.region]
if perimeter.department:
perimeter.departments = [perimeter.department]
perimeter.save()
class Migration(migrations.Migration):
dependencies = [
("geofr", "0011_auto_20180918_1027"),
]
operations = [
migrations.RunPython(set_regions_departments),
]
|
from flask import Flask, make_response, request
app = Flask("dummy")
def configure_app(app):
'''
add database link to the config of app
'''
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
|
import tensorflow as tf
from typing import Tuple
from tensorflow.keras import initializers
from random import randint
# import tensorflow_addons as tfa
def build_feature_extractor(
input_seq_len: int,
batch_size: int,
) -> Tuple[tf.keras.Model, int]:
print(input_seq_len)
seq_input = tf.keras.layers.Input(shape=(input_seq_len,4), batch_size=batch_size,
dtype='float32', name='input_sequence')
current = tf.keras.layers.Conv1D(
filters=1,
kernel_size=11,
strides=1,
padding='same',
use_bias=False,
dilation_rate=1,
)(seq_input)
current = tf.keras.layers.MaxPool1D(
pool_size=11,
padding='same')(current)
current = tf.keras.layers.Flatten()(current)
united_embedding_size = 512
united_emb_layer = tf.keras.layers.Dense(
units=united_embedding_size,
activation='relu',
kernel_initializer=initializers.he_normal(seed=randint(0, 100000)),
name='UnitedEmbeddingLayer')
final_output = united_emb_layer(current)
fe_model = tf.keras.Model(
inputs=seq_input,
outputs=final_output,
name='FeatureExtractionModel'
)
fe_model.build(input_shape=(batch_size, input_seq_len, 4))
return fe_model, united_embedding_size
def build_feature_extractor2(
input_seq_len: int,
batch_size: int,
) -> Tuple[tf.keras.Model, int]:
seq_input = tf.keras.layers.Input(shape=(input_seq_len, 4), batch_size=batch_size,
dtype='float32', name='input_sequence')
current = seq_input
for i in range(11):
current = conv_block(current, filters = 96,kernel_size=3, dilation_rate=3, pool_size=2)
current = tf.keras.layers.Flatten()(current)
united_embedding_size = 512
united_emb_layer = tf.keras.layers.Dense(
units=united_embedding_size,
activation='relu',
kernel_initializer=initializers.he_normal(seed=38),
name='UnitedEmbeddingLayer')
final_output = united_emb_layer(current)
fe_model = tf.keras.Model(
inputs=seq_input,
outputs=final_output,
name='FeatureExtractionModel'
)
fe_model.build(input_shape=(batch_size, input_seq_len, 4))
return fe_model, united_embedding_size
def build_twin_regressor(united_embedding_size: int,
batch_size: int,
target_size: int) -> tf.keras.Model:
left_input = tf.keras.layers.Input(shape=(united_embedding_size,), dtype=tf.float32,
batch_size=batch_size, name='features_left')
right_input = tf.keras.layers.Input(shape=(united_embedding_size,), dtype=tf.float32,
batch_size=batch_size, name='features_right')
concatenated_features = tf.keras.layers.Concatenate(
name='ConcatFeatures'
)([left_input, right_input])
regression_layer = tf.keras.layers.Dense(
units=target_size, input_dim=united_embedding_size * 2, activation=None,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=42),
bias_initializer='zeros',
name='RegressionLayer'
)(concatenated_features)
twin_regression_model = tf.keras.Model(
inputs=[left_input, right_input],
outputs=regression_layer,
name='TwinRegressionModel'
)
twin_regression_model.build(input_shape=[(batch_size, united_embedding_size),
(batch_size, united_embedding_size)])
return twin_regression_model
def build_neural_network(
seq_len: int,
batch_size: int,
target_size: int) -> Tuple[tf.keras.Model, tf.keras.Model,
tf.keras.Model]:
fe_layer, emb_units = build_feature_extractor( input_seq_len=seq_len, batch_size=batch_size)
left_input = tf.keras.layers.Input(shape=(seq_len,4), batch_size=batch_size,
dtype='float32', name='left_sequence')
right_input = tf.keras.layers.Input(shape=(seq_len,4), batch_size=batch_size,
dtype='float32', name='right_sequence')
left_output = fe_layer(left_input)
right_output = fe_layer(right_input)
regression_model = build_twin_regressor(emb_units, batch_size, target_size)
regression_layer = regression_model([left_output, right_output])
siamese_model = tf.keras.Model(
inputs=[left_input,
right_input],
outputs=regression_layer,
name='SiameseModel'
)
# radam = tf.optimizers.RectifiedAdam(learning_rate=1e-5)
# ranger = tf.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
siamese_model.compile(optimizer='Adam', loss=tf.keras.losses.MeanSquaredError())
return siamese_model, fe_layer, regression_model |
import bisect
import hashlib
class ConsistentHashRing(object):
def __init__(self, replicas=100):
self.replicas = replicas
self._keys = []
self._nodes = {}
def _hash(self, key):
"""Given a string key, return a hash value."""
key = str(key)
return int(hashlib.md5(str.encode(key)).hexdigest(), 16)
def _repl_iterator(self, nodename):
"""Given a node name, return an iterable of replica hashes."""
return (self._hash("%s:%s" % (nodename, i))
for i in range(self.replicas))
def __setitem__(self, nodename, node):
for hash_ in self._repl_iterator(nodename):
if hash_ in self._nodes:
raise ValueError("Node name %r is already present" % nodename)
self._nodes[hash_] = node
bisect.insort(self._keys, hash_)
def __delitem__(self, nodename):
"""Remove a node, given its name."""
for hash_ in self._repl_iterator(nodename):
# will raise KeyError for nonexistent node name
del self._nodes[hash_]
index = bisect.bisect_left(self._keys, hash_)
del self._keys[index]
def __getitem__(self, key):
hash_ = self._hash(key)
start = bisect.bisect(self._keys, hash_)
if start == len(self._keys):
start = 0
return self._nodes[self._keys[start]]
|
from scripts.parsing_singlethread import *
# globals
record_limit = 100
stopwatch = Stopwatch()
stopwatch.start()
# truncate database
create_database()
create_domain_table()
# do insertion
insert_all_normal(limit=record_limit)
# stop measuring
stopwatch.stop()
print("Time of execution: ", stopwatch.results())
|
from django.contrib import admin
from .models import TutorProfile, TutorReviews, StudentProfile
# Register your models here.
class TutorProfileAdmin(admin.ModelAdmin):
class Meta:
model = TutorProfile
class TutorReviewsAdmin(admin.ModelAdmin):
class Meta:
model = TutorReviews
class StudentProfileAdmin(admin.ModelAdmin):
class Meta:
model = StudentProfile
admin.site.register(TutorProfile,TutorProfileAdmin)
admin.site.register(TutorReviews,TutorReviewsAdmin)
admin.site.register(StudentProfile,StudentProfileAdmin)
|
import unittest
from piepline.data_producer import BasicDataset
class TestingBasicDataset(BasicDataset):
def _interpret_item(self, item) -> any:
return self._items[item]
class BasicDatasetTest(unittest.TestCase):
def test_init(self):
try:
TestingBasicDataset(list(range(12)))
TestingBasicDataset([{'a': i, 'b': i * 2} for i in range(12)])
except Exception as err:
self.fail("Basic initialisation failed with error: ['{}']".format(err))
def test_get_items_test(self):
items = list(range(13))
dataset = TestingBasicDataset(items)
self.assertEqual(dataset.get_items(), items)
|
class Car:
# Properties
color = ""
brand = ""
number_of_wheels = 4
number_of_seates = 4
maxspeed = 0
# constructor
def __init__(self, color, brand, number_of_wheels, number_of_seates, maxspeed):
self.color = color
self.brand = brand
self.number_of_seates = number_of_seates
self.number_of_wheels = number_of_wheels
self.maxspeed = maxspeed
# Create method set color
def setcolor(self, x):
self.color = x
# Create method set brand
def setbrand(self, x):
self.brand = x
# Create method set brand
def setspeed(self, x):
self.maxspeed = x
def printdata(self):
print("Color of thsi car is : ", self.color)
print("Brand of thsi car is : ", self.brand)
print("Maxspeed of thsi car is : ", self.maxspeed)
# Deconstructor
def __del__(self):
print()
|
#f1 = open('d:\te.txt',encoding='utf-8',mode='r') #OSError: [Errno 22] Invalid argument: 'd:\te.txt'
f1 = open('d:/te.txt',encoding='utf-8',mode='r')
content = f1.read()
print(content)
f1.close()
'''
open 内置函数,open底层调用的是操作系统的接口。
f1,变量,f1,fh,file_handler,f_h,文件句柄。 对文件进行的任何操作,都得通过文件句柄. 的方式。
encoding:可以不写,不写参数,默认编码本:操作系统的默认的编码
windows: gbk。
linux: utf-8.
mac : utf-8.
f1.close() 关闭文件句柄。
''' |
import sys
import os
import json
import time
import hmac
import hashlib
import base64
import requests
import numpy as np
import urllib.request
import urllib, time, datetime
import os.path
import time
import hmac
import hashlib
from decimal import *
try:
from urllib import urlencode
from urlparse import urljoin
except ImportError:
from urllib.parse import urlencode
from urllib.parse import urljoin
from coinsuper import get_orderbook, all_order_details, balances
# def filterActive(active):
# for a in active:
# del a['createtime']
# del a['orderid']
# return active
ticker, d, a = "OMX/BTC", 1, 1
timeCnt, execTrades = 0, 0
starttime = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%Z")
while (1):
try:
orders = get_orderbook(ticker, 10)
bid, ask = orders['bids'][0]['limitPrice'], orders['asks'][0]['limitPrice']
midpoint = np.mean([bid, ask])
bals = balances()
active = all_order_details()
#active = filterActive(active_orders)
#print("active:", active[])
for i in range(len(active)):
print("active:", active[i])
print("PosFeed Version 1 -yungquant")
print("Ticker:", ticker)
print("starttime:", starttime)
print("balances:", bals)
print("price:", midpoint, "\n")
time.sleep(10)
timeCnt += 1
print("timeCnt:", timeCnt, ",", timeCnt / 6, "minutes\n")
except:
print("FUUUUUUUUUUCK", sys.exc_info())
time.sleep(1)
|
from sqlMethods import *
import pandas as pd
from twitter_queries import *
def get_all_entities(con):
sql = """
SELECT
tweet_id, entity_type, start_index, stop_index
FROM
tweet_entities
WHERE
entity_type = 'USER_MENTION'
OR entity_type = 'URL'
OR entity_type = 'MEDIA'
ORDER BY stop_index
"""
data = execute(con, sql)
df = pd.DataFrame(data).drop_duplicates()
return df
#clean an individual tweet
def clean_tweet(tw,entitiesDf):
tweet_id = tw['tweet_id']
raw_text = tw['tweet_text'].lower()
#clean entities
entities = entitiesDf[entitiesDf['tweet_id'] == tweet_id]
amt = entities.shape[0]
#Check if there are any entiries
if(amt == 0):
clean = raw_text
else:
clean = ""
startInd = 0
for entitityId in entities.index[:-1]:
entity = entities.loc[entitityId]
clean += raw_text[startInd:entity['start_index']]
startInd = entity['stop_index']
clean += raw_text[startInd:entities.iloc[-1]['start_index']]
clean += raw_text[entities.iloc[-1]['stop_index']:]
#clean query
query = tw['twitter_query'].lower()
clean = clean.replace(query,"")
print()
print("QUERY : ", query)
print('ORIG : ', raw_text)
print('CLEAN: ',clean )
return clean
#if nothing passed in for newTweets, clean ALL tweets
def clean_tweets(con,newTweets = []):
entitiesDf = get_all_entities(con)
if(len(newTweets) == 0):
#Pull all tweets from DB
raw_tweets = selectAll(con,'raw_tweets')
else:
#Tweets passed in
raw_tweets = newTweets
clean_tweets = []
for tw in raw_tweets:
clean = clean_tweet(tw,entitiesDf)
dbObj = {
'tweet_id' : tw['tweet_id'],
'clean_text' : clean
}
clean_tweets.append(dbObj)
insertAll(con,'clean_tweets',clean_tweets,updateDuplicates=True)
return clean_tweets
def getTweetsAndClean():
con = getConnection()
t_obj=authTW()
tweets = get_tweets(t_obj)
clean = clean_tweets(con,tweets)
con.close()
return clean
def main():
getTweetsAndClean()
#t_obj=authTW()
#get_tweets(t_obj) #rename to load tweets
#Classify New tweets
#do node stuff
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
'''
Created on Dec 13, 2016
@author: ToOro
'''
from technique.web_crawling.util.common import link_crawler
from technique.web_crawling.mongo_cache import MongoCache
from alexa_cb import AlexaCallback
def main():
scrape_callback = AlexaCallback()
cache = MongoCache()
# cache.clear()
link_crawler(seed_url=scrape_callback.seed_url, cache_callback=cache, scrape_callback=scrape_callback)
if __name__ == '__main__':
main() |
while True:
tal1 = int(input("Mata in tal1"))
tal2 = int(input("Mata in tal2"))
print(f"summan av {tal1} och {tal2} är {tal1+tal2}")
fortsatt = input("Vill du fortsätta? J/N")
if fortsatt == "N":
break
#TODO Vi gör en till loop - ogiltig inmatning
|
# from sqlalchemy import Column, String, create_engine, CHAR, Integer
# from sqlalchemy.orm import sessionmaker
# from sqlalchemy.ext.declarative import declarative_base
#
# Base = declarative_base()
#
#
# class User(Base):
# __tablename__ = "user"
# id = Column(Integer, primary_key=True)
# nameuser = Column(String(200), unique=True)
# undergraduate = Column(String(250))
# graduatestudent = Column(String(100))
# international_ratio = Column(String(250))
# teacher_student = Column(String(100))
# url = Column(String(250))
# address = Column(String(100))
#
#
# engine = create_engine(
# "mysql+pymysql://root:666666@172.18.0.1:3306/mypydb")
# Base.metadata.create_all(engine)
# DBSession = sessionmaker(bind=engine)
#
#
#
# session =DBSession()
#
# obj=User(nameuser="sss")
# session.add(obj)
# session.commit()
import pymysql
pymysql.connect(db='center', user='bnu', passwd='bnu', host='172.16.160.203', port=3306) |
import psycopg2
import os
# DEFAULTS
DEFAULT_PORT = 2345
DEFAULT_PASSWORD = '123'
DEFAULT_HOST = '127.0.0.1'
# variables
password = os.getenv('POSTGRES_PASSWORD')
if (password is None):
password = DEFAULT_PASSWORD
port = DEFAULT_PORT
host = DEFAULT_HOST
def main(password, host, port):
# create connection
try:
conn = psycopg2.connect(database="postgres", user="postgres", password=password, host=host, port=port)
print(conn)
conn.close()
print ('suceeded - exits')
except:
print ('connection failed - did you run the server?')
if (__name__ == '__main__'):
main(password = password, host=host ,port=port) |
import subprocess
import os
import sys
import re
import json
import pdb
import datetime
import shlex
from collections import defaultdict, Counter
from collections.abc import Mapping
from itertools import chain
__all__ = ["run", "pipe", "Pipe", "save_stats", "string2cigar", "cigar2string", "guess_sample_name", "nullcontext", "CONSUMES_REF", "CONSUMES_READ"]
CONSUMES_REF = "MDN=X"
CONSUMES_READ = "MIS=X"
class nullcontext(object):
def __enter__(self):
return None
def __exit__(self, *excinfo):
pass
#ILLUMINA_FASTQ = re.compile(r"(.+)_S([0-9]{1,2})_L([0-9]{3})_R([12])_001\.fastq(\.gz)?$") # name, s_number, lane, read, gzip
#def illumina_readgroup(filepath):
#basename = os.path.basename(filepath)
#sample = "_".join(basename.split("_")[:-4]) # remove the _Sx_Lxxx_Rx_001.fastq from the name
#with open(filepath) as f:
#identifier = f.readline().split(":")
#flowcell = identifier[2]
#return "@RG\\tID:{}\\tSM:{}".format(flowcell, sample)
def guess_sample_name(fastqs):
ILLUMINA_FASTQ = re.compile(r"_S[0-9]{1,2}_L[0-9]{3}_R[12]_001\.fastq(\.gz)?$")
OTHER_FASTQ = re.compile(r"_R?[1-2]\.fastq(\.gz)?$")
name = None
for regex in (ILLUMINA_FASTQ, OTHER_FASTQ):
if not name:
for fastq in fastqs:
match = regex.search(fastq)
if match:
guess = fastq[:match.start()]
if name and guess != name:
return None
name = guess
return name
def string2cigar(cigstr):
if cigstr == "*":
return []
cig = []
num = ""
for char in cigstr:
if char.isnumeric():
num += char
else:
try:
cig.append((int(num), char))
except ValueError:
sys.exit(f"Malformed cigar string {cigstr}")
num = ""
if num:
raise sys.exit(f"Malformed cigar string {cigstr}")
return cig
def cigar2string(cig):
return "".join(str(val) for val in chain(*cig)) or "*"
def rekey(mapping):
""" Recursively convert all numeric text keys to integer keys. This will
enable correct ordering when re-written to file.
"""
new = {}
for k, v in mapping.items():
try:
k = int(k)
except ValueError:
pass
if isinstance(v, Mapping):
v = rekey(v)
new[k] = v
return new
def save_stats(path, update):
try:
with open(path, "rt") as f_in:
stats = rekey(json.load(f_in))
stats.update(update)
except OSError:
stats = update
with open(path, "wt") as f_out:
json.dump(stats, f_out, sort_keys=True, indent=4)
def pretty_duration(seconds):
mins, secs = divmod(int(seconds), 60)
hours, mins = divmod(mins, 60)
duration = [f"{hours} hours"] if hours else []
if hours or mins:
duration.append(f"{mins} minutes")
duration.append(f"{secs} seconds")
return " ".join(duration)
class Pipe(object):
""" Wrapper arond the pipe function that will maintain a record of the
time taken to run each command. This is stored by command, ie if
a single command is run several times the time will be recorded as
the total time of all of the invocations.
"""
def __init__(self):
self._durations = Counter()
def __call__(self, *args, **kwargs):
start = datetime.datetime.now()
ret = pipe(*args, **kwargs)
stop = datetime.datetime.now()
self._durations[args[0][0]] += (stop - start).total_seconds()
return ret
@property
def durations(self):
padding = max(len(key) for key in self._durations)
template = f"{{:{padding}}} {{}} "
return "\n".join(template.format(k, pretty_duration(v)) for k, v in sorted(self._durations.items()))
def pipe(args, exit_on_failure=True, **kwargs):
""" Runs a main pipeline command. Output is bytes rather than string and
is expected to be captured via stdout redirection or ignored if not
needed. The command is echoed to stderr before the command is run.
"""
args = [str(arg) for arg in args]
print(" ".join(shlex.quote(arg) for arg in args), file=sys.stderr, flush=True)
completedprocess = subprocess.run(args, **kwargs)
sys.stderr.flush()
if exit_on_failure and completedprocess.returncode:
sys.exit(completedprocess.returncode)
return completedprocess
def run(args, exit_on_failure=True):
""" Run a unix command as a subprocess. Stdout and stderr are captured as
a string for review if needed. Not to be used for main pipeline
comands which should be called with pipe instead.
"""
args = [str(arg) for arg in args]
completedprocess = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if exit_on_failure and completedprocess.returncode:
for line in completedprocess.stderr.splitlines():
print(line, file=sys.stderr, flush=True)
sys.exit(completedprocess.returncode)
return completedprocess
|
# -*- coding: utf-8 -*-
import numpy as np
import pdb
def read_seq(seq_file, mod="extend"):
seq_list = []
seq = ""
with open(seq_file, "r") as fp:
for line in fp:
seq = line[:-1]
seq_array = get_seq_concolutional_array(seq)
seq_list.append(seq_array)
return np.array(seq_list)
def get_seq_concolutional_array(seq):
# seq = seq.replace('U', 'T')
# except BJOUXZ
alpha = "ACDEFGHIKLMNPQRSTVWY"
row = len(seq)
new_array = np.zeros((row, 20))
for i, val in enumerate(seq):
if val not in "ACDEFGHIKLMNPQRSTVWY":
if val == "Z":
new_array[i] = np.array([0.0] * 20)
# if val == 'S':
# new_array[i] = np.array([0, 0.5, 0.5, 0, 0])
continue
try:
index = alpha.index(val)
new_array[i][index] = 1
except ValueError:
pdb.set_trace()
return new_array
# ------------------------------------主函数---------------------------------------------
from sklearn.model_selection import StratifiedKFold, KFold, StratifiedShuffleSplit
from keras import backend as K
from keras.utils import np_utils
import numpy as np
import os
if __name__ == "__main__":
# trueSet, falseSet = readfile('data/IE_true.seq', 'data/IE_false.seq', 0)
seq_list = []
seq = ""
i = 0
with open("data/DNA_Pading2_PDB14189", "r") as fp:
for line in fp:
seq = line[:-1]
if len(seq) != 1000:
print("[" + str(i) + "]:/-[" + str(len(seq)) + "]")
i += 1
# from numpy import array
# from keras.preprocessing.text import one_hot
# from keras.preprocessing.sequence import pad_sequences
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import Flatten
# from keras.layers.embeddings import Embedding
# # define documents
# docs = ['Well done!',
# 'Good work',
# 'Great effort',
# 'nice work',
# 'Excellent!',
# 'Weak',
# 'Poor effort!',
# 'not good',
# 'poor work',
# 'Could have done better.']
# # define class labels
# labels = array([1,1,1,1,1,0,0,0,0,0])
# # integer encode the documents
# vocab_size = 50
# encoded_docs = [one_hot(d, vocab_size) for d in docs]
# print(encoded_docs)
# # pad documents to a max length of 4 words
# max_length = 4
# padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
# print(padded_docs)
# # define the model
# model = Sequential()
# model.add(Embedding(vocab_size, 8, input_length=max_length))
# model.add(Flatten())
# model.add(Dense(1, activation='sigmoid'))
# # compile the model
# model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# # summarize the model
# print(model.summary())
# # fit the model
# model.fit(padded_docs, labels, epochs=50, verbose=0)
# # evaluate the model
# loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
# print('Accuracy: %f' % (accuracy*100))
|
# -*- coding: utf-8 -*-
import gzip
import hashlib
import hmac
from StringIO import StringIO
import random
import xlrd
def gzdecode(data):
compressedstream = StringIO(data)
gziper = gzip.GzipFile(fileobj=compressedstream)
data2 = gziper.read()
return data2
def random_str(len):
str = ""
for i in range(len):
str += random.choice("1234567890")
return str
def create_signature(token, str):
return hmac.new(token, str, hashlib.sha1).digest().encode('base64').rstrip()
def get_request_params_list(param_values_file, testcase):
doc = xlrd.open_workbook(param_values_file)
table = doc.sheet_by_index(0)
rows = table.nrows
col = table.ncols
keys = []
param_list = []
for i in range(rows):
if i == 0:
keys=table.row_values(i,1,col)
continue
if testcase == table.cell_value(i, 0):
param_dict = {}
for key in keys:
param_dict[key] = table.cell_value(i, keys.index(key)+1)
param_list.append(param_dict)
return param_list
|
import tensorflow as tf
import numpy as np
# 加入忽略
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
a = np.arange(0, 5) # 生成步长为1的等差一维数组
b = tf.convert_to_tensor(a, dtype=tf.int64) # 将a转化为张量
c = tf.fill([2, 2], 3)
d = tf.constant([1, 5], dtype=tf.int64) # 创建张量
d_change = tf.cast(d, dtype=tf.float64) # 强制转换数据类型
print('a: ', a, 'b: ', b)
print('d:', d, 'd_change:', d_change)
# 创建均值为0.5, 标准差为1的正态分布
e = tf.random.normal([2, 2], mean=0.5, stddev=1)
# 生成随机数在(0.5-2*0.6, 0.5+2*0.6)之间
f = tf.random.truncated_normal([3, 2], mean=0.6, stddev=0.5)
g = tf.random.uniform([4, 2], minval=0, maxval=1) # 随机生成均匀分布数值
print('e:', e, 'f:', f, 'g:', g)
# tf.reduce_min(x)返回最小值,tf.reduce_max(x)返回最大值
# tf.reduce_mean(x)获取x中所有数的中值
# tf.matmul(x, y)矩阵相乘
# tf.data.Dataset.from_tensor_slices((x,y))加载数据,合并特征及标签
# tf.GradientTape().gradient()求导,tf.assign_sub()自减,enumerate返回索引
# tf.one_hot()将input转换为one-hot类型输出,即将多个数值联合放在一起作为多个相同类型的向量
# tf.argmax(x, axis=0/1)返回每一行/列最大值的索引
# tf.where(tf.greater(x, y), x, y)若x>y,返回x对应位置的元素,否则返回y对应位置的元素
# np.random.RandomState().rand生成[0,1)的随机数
|
import sys
import re
import string
def do_generate(input_file, output):
content = str(input_file.read())
# replace <doc> and next line with next line
#pattern = re.compile(r'<doc.*>\n.*\n')
#content = str(pattern.findall(content))
documents = re.split(r'</doc>\n', content)
content = ''
for document in documents:
#print "Here: ", document
document = re.sub(r'<doc.*>\n','', document)
document = re.sub(r'\n', '\t', document, count=1)
document = re.sub(r'\n', ' ', document)
content += document + '\n'
#content = re.sub(r'<doc.*>\n', '', title_extract.group(0))
#content = re.sub(r'\n', '\t', content)
# replace </doc> with next
# replace </doc> with \n
output.write(content)
if __name__=='__main__':
# print help
if len(sys.argv) != 2:
print('Usage: python generate_linedoc.py [input_name] (output will be input_name_linedoc)')
exit(1)
# do parse
input_file = open(sys.argv[1])
output = open(sys.argv[1]+'_linedoc', 'w')
do_generate(input_file, output)
output.close()
input_file.close()
|
num1 = 12
key = True
if num1 == 12:
if key:
print('Num1 is equal to Twelve and they have the key!')
else: print('Num1 is equal to Twelve and they do no have the key!')
elif num1 < 12:
print('Num1 is less than Twelve!')
else:
print('Num1 is not eqaul to Twelve!')
|
from django.core.exceptions import ValidationError
import os
def validate_file_size(value):
filesize= value.size
if filesize > 2097152:
raise ValidationError("The maximum file size that can be uploaded is 2MB")
else:
return value
def validate_file_extension(value):
ext = os.path.splitext(value.name)[1] # [0] returns path+filename
valid_extensions = ['.txt']
if not ext.lower() in valid_extensions:
raise ValidationError(u'Unsupported file extension. Only txt files are supported')
|
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Table, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
import random
import string
import httplib2
Base = declarative_base()
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(32))
email = Column(String, index=True)
class HelperMixin(object):
@classmethod
def verify_valid_pic(cls, url):
h = httplib2.Http()
try:
response_header = h.request(url, 'GET')[0]
ping_status_code = response_header['status']
content_type = response_header['content-type']
if ping_status_code != "200" or 'image' not in content_type:
# If the resource doesn't exist or isn't image, don't save it
result = None
else:
result = url
except:
result = None
return result
def validate_object(self):
if self.name == '':
return False
else:
return True
class Food(HelperMixin, Base):
__tablename__ = 'food'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
picture = Column(String(250))
protected = Column(Boolean, default=False)
@property
def serialize(self):
return {
'name': self.name,
'id': self.id,
}
var_char_table = Table('association', Base.metadata,
Column('variety_id', Integer, ForeignKey('variety.id')),
Column('characteristic_id', Integer,
ForeignKey('characteristic.id')),
)
class Variety(HelperMixin, Base):
__tablename__ = 'variety'
name = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
description = Column(String(250))
food_id = Column(Integer, ForeignKey('food.id'))
food = relationship(Food)
picture = Column(String(250))
characteristics = relationship('Characteristic',
secondary=var_char_table)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship(User)
@property
def serialize(self):
variety_hash = {
'name': self.name,
'description': self.description,
'id': self.id,
'food': self.food.name,
}
variety_hash['characteristics'] = []
for c in self.characteristics:
variety_hash['characteristics'].append(c.char)
return variety_hash
class Characteristic(Base):
__tablename__ = 'characteristic'
char = Column(String(80), nullable=False)
id = Column(Integer, primary_key=True)
engine = create_engine('sqlite:///FoodVarieties.db')
Base.metadata.create_all(engine)
|
# Source : https://github.com/mission-peace/interview/blob/master/python/dynamic/longest_increasing_subsequence.py
# Find a subsequence in given array in which the subsequence's elements are in sorted order, lowest to highest, and in which the subsequence is as long as possible.
# Time Complexity: O(N^2), Space Complexity: O(N)
def longest_increasing_subsequence(arr):
length_arr = len(arr)
longest = float('-inf')
for idx in range(length_arr-1):
longest_so_far = longest_increasing_subsequence_recursive(arr, idx+1, arr[idx])
longest = max(longest, longest_so_far)
return longest + 1
def longest_increasing_subsequence_recursive(arr, next, curr_val):
if next == len(arr):
return 0
with_next = 0
if arr[next] > curr_val:
with_next = 1 + longest_increasing_subsequence_recursive(arr, next+1, arr[next])
without_next = longest_increasing_subsequence_recursive(arr, next+1, curr_val)
return max(with_next, without_next)
if __name__ == '__main__':
# Using recursion
print (longest_increasing_subsequence([3, 4, -1, 0, 6, 2, 3]))
print (longest_increasing_subsequence([2, 5, 1, 8, 3]))
|
import numpy as np
import tensorflow as tf
import cv2
import os
from style_transfer.model import build_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def preprocess_img(img, target_shape=None):
# image = cv2.imread(str(path))
if target_shape is not None:
img = cv2.resize(img, target_shape)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)
img = np.expand_dims(img, axis=0)
return img
def get_stylized_image(content, style_weights):
tf.reset_default_graph()
transformation_model = build_model(input_shape=(None, None, 3))
transformation_model.load_weights(style_weights)
content_image = preprocess_img(content)
gen = transformation_model.predict(content_image)
gen = np.squeeze(gen)
gen = gen.astype(np.uint8)
gen = cv2.cvtColor(gen, cv2.COLOR_RGB2BGR)
tf.keras.backend.clear_session()
return gen
|
# Generated by Django 3.1.6 on 2021-08-09 05:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('BMIList', '0007_checkstatus_eprogram_indexstatus_record'),
]
operations = [
migrations.CreateModel(
name='InputData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Date', models.DateTimeField(auto_now_add=True, null=True)),
('Height', models.FloatField(default='')),
('Weight', models.FloatField(default='')),
('BMITotal', models.FloatField(default='')),
('BMIResult', models.TextField(default='')),
('SignUp', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='BMIList.signup')),
],
),
migrations.RemoveField(
model_name='checkstatus',
name='SignUp',
),
migrations.RemoveField(
model_name='eprogram',
name='SignUp',
),
migrations.DeleteModel(
name='Index',
),
migrations.RemoveField(
model_name='indexstatus',
name='SignUp',
),
migrations.RemoveField(
model_name='record',
name='CheckStatus',
),
migrations.RemoveField(
model_name='record',
name='Eprogram',
),
migrations.DeleteModel(
name='CheckStatus',
),
migrations.DeleteModel(
name='EProgram',
),
migrations.DeleteModel(
name='IndexStatus',
),
migrations.DeleteModel(
name='Record',
),
]
|
import inspect
import numbers
import math
import numpy
import FreeCAD
import Part
import Mesh
import FreeCADGui as Gui
from forbiddenfruit import curse
def print(x):
FreeCAD.Console.PrintMessage (str(x)+"\n")
def document():
return FreeCAD.activeDocument()
def vector(*arguments, angle = None, length = 1):
if angle is not None:
return FreeCAD.Vector (length*math.cos(angle), length*math.sin (angle))
if len (arguments) > 0 and type(arguments [0]) is Part.Point:
return FreeCAD.Vector (arguments [0].X, arguments [0].Y, arguments [0].Z)
return FreeCAD.Vector (*arguments)
def width (argument):
if isinstance (argument, numbers.Number):
return argument
return argument.width()
def minimum (argument):
if isinstance (argument, numbers.Number):
return 0
return argument.minimum()
def box (*arguments, origin = vector()):
if len (arguments) == 1:
arguments = arguments*3
if len (arguments) != 3:
raise InputError ("box() must take either 1 or 3 arguments")
result = Part.makeBox (*[width (argument) for argument in arguments])
result.translate (origin + vector (*[minimum (argument) for argument in arguments]))
return result
def fancy_extrude (input, direction, range_argument = 1):
result = input.extrude (direction*width (range_argument))
result.translate (direction*minimum (range_argument))
return result
class centered():
def __init__(self, width, on = 0):
self._width = width
self.on = on
def width (self):
return self._width
def minimum (self):
return self.on - self._width/2
class bounds():
def __init__(self, min, max):
self.min = min
self.max = max
def width (self):
return self.max - self.min
def minimum (self):
return self.min
def arc_center (endpoints, radius):
delta = (endpoints [1] - endpoints [0])/2
adjacent = delta.Length
opposite = math.sqrt (radius**2 - adjacent**2)
return endpoints [0] + delta + vector (- delta [1], delta [0]).normalized()*(opposite*numpy.sign (radius))
def arc_midpoint (endpoints, radius, direction = 1):
delta = (endpoints [1] - endpoints [0])/2
adjacent = delta.Length
#print (radius, adjacent)
opposite = math.sqrt (radius**2 - adjacent**2)
return endpoints [0] + delta + vector (- delta [1], delta [0]).normalized()*(opposite*numpy.sign (radius) - radius*direction)
def point_circle_tangent (point, circle, direction = 1):
center, radius = circle
delta = point - center
distance = delta.Length
angle = math.atan2 (delta [1], delta [0])
radius = radius*direction
flip = numpy.sign (radius)
radius = abs (radius)
angle_offset = math.acos (radius/distance)
tangent_angle = angle + angle_offset*flip
return center + vector (radius*math.cos (tangent_angle), radius*math.sin (tangent_angle))
def circle_circle_tangent_segment (circle_1, circle_2, direction_1 = 1, direction_2 = 1):
center_1, radius_1 = circle_1
center_2, radius_2 = circle_2
radius_1 = radius_1*direction_1
radius_2 = radius_2*direction_2
center = (center_1*radius_2 - center_2*radius_1)/(radius_2 - radius_1)
flip_1 = 1
flip_2 = 1
if numpy.sign (radius_1) == numpy.sign (radius_2):
if abs (radius_1) < abs (radius_2):
flip_1 = -1
else:
flip_2 = -1
return point_circle_tangent (center, (center_1, - flip_1*radius_1)), point_circle_tangent (center, (center_2, flip_2*radius_2))
def show (shape, name, invisible = False):
if type(shape) is Mesh.Mesh:
Mesh.show (shape, name)
else:
Part.show (shape, name)
if invisible:
Gui.getDocument ("Something").getObject (name).Visibility = False
def show_invisible (shape, name):
show(shape, name, invisible = True)
operations_to_make_applied_version_of = [
("translate", "translated"),
("scale", "scaled"),
("rotate", "rotated"),
("reverse", "reversed"),
]
applied_operations = {}
for operation_name, applied_name in operations_to_make_applied_version_of:
def applied(operation_name, applied_name):
def applied (self,*arguments,**keyword_arguments):
result = self.copy()
#print (operation_name)
getattr(result, operation_name) (*arguments)
return result
return applied
applied_operations [applied_name] = applied(operation_name, applied_name)
def curse_freecad_types():
for value in vars (Part).values():
if inspect.isclass (value):
part_class = value
for operation_name, applied_name in operations_to_make_applied_version_of:
curse (part_class, applied_name, applied_operations [applied_name])
curse (part_class, "to_face", lambda part: Part.Face (part))
curse (part_class, "fancy_extrude", fancy_extrude)
curse (part_class, "as_xz", lambda part: part.rotated(vector(), vector (1, 0, 0), 90))
curse (part_class, "as_yz", lambda part: part.rotated(vector(), vector (0, 1, 0), 90).rotated(vector(), vector (1, 0, 0), 90))
curse (Part.Shape, "to_wire", lambda part: Part.Wire (part.Edges))
curse (FreeCAD.Vector, "copy", lambda v: v + vector())
curse (FreeCAD.Vector, "normalized", lambda v: v.copy().normalize())
curse (FreeCAD.Vector, "angle", lambda v: math.atan2(v[1],v[0]))
curse (FreeCAD.Vector, "rotated", lambda v, amount: vector (angle = v.angle() + amount, length = v.Length))
|
import battlecode as bc
import random
import sys
import traceback
import Units.sense_util as sense_util
import Units.movement as movement
import Units.explore as explore
import Units.Ranger as Ranger
import Units.variables as variables
import Units.clusters as clusters
import time
battle_radius = 10
def timestep(unit):
#print(building_assignment)
# last check to make sure the right unit type is running this
gc = variables.gc
info = variables.info
karbonite_locations = variables.karbonite_locations
blueprinting_queue = variables.blueprinting_queue
blueprinting_assignment = variables.blueprinting_assignment
building_assignment = variables.building_assignment
current_roles = variables.current_worker_roles
num_enemies = variables.num_enemies
planet = gc.planet()
if planet == bc.Planet.Earth:
battle_locs = variables.earth_battles
diagonal = variables.earth_diagonal
else:
battle_locs = variables.mars_battles
diagonal = variables.mars_diagonal
earth_start_map = variables.earth_start_map
unit_types = variables.unit_types
if unit.unit_type != unit_types["worker"]:
# prob should return some kind of error
return
# make sure unit can actually perform actions ie. not in garrison
if not unit.location.is_on_map():
return
my_location = unit.location.map_location()
if my_location.planet is variables.mars:
if gc.round()>700:
try_replicate = replicate(gc, unit)
if try_replicate:
return
mine_mars(gc,unit,my_location)
return
if gc.round() > 225 and not variables.saviour_worker and near_factory(my_location):
variables.saviour_worker = True
variables.saviour_worker_id = unit.id
if variables.saviour_worker_id is not None and variables.saviour_worker_id == unit.id:
if variables.saviour_blueprinted:
try:
corr_rocket = gc.unit(variables.saviour_blueprinted_id)
if not corr_rocket.structure_is_built():
if gc.can_build(unit.id, variables.saviour_blueprinted_id):
gc.build(unit.id, variables.saviour_blueprinted_id)
else:
if gc.can_load(variables.saviour_blueprinted_id, unit.id):
gc.load(variables.saviour_blueprinted_id, unit.id)
variables.saviour_worker_id = None
variables.saviour_worker = False
variables.saviour_blueprinted = False
variables.saviour_blueprinted_id = None
variables.num_unsuccessful_savior = 0
except:
variables.saviour_worker_id = None
variables.saviour_worker = False
variables.saviour_blueprinted = False
variables.saviour_blueprinted_id = None
variables.num_unsuccessful_savior = 0
else:
blueprinted = False
for dir in variables.directions:
map_loc = my_location.add(dir)
map_loc_coords = (map_loc.x, map_loc.y)
if map_loc_coords in variables.passable_locations_earth and variables.passable_locations_earth[map_loc_coords]:
if gc.can_blueprint(unit.id, variables.unit_types["rocket"], dir):
gc.blueprint(unit.id, variables.unit_types["rocket"], dir)
variables.saviour_blueprinted = True
new_blueprint = gc.sense_unit_at_location(map_loc)
variables.saviour_blueprinted_id= new_blueprint.id
variables.all_building_locations[variables.saviour_blueprinted_id] = map_loc
blueprinted = True
break
elif variables.num_unsuccessful_savior > 5:
if gc.has_unit_at_location(map_loc):
in_the_way_unit = gc.sense_unit_at_location(map_loc)
gc.disintegrate_unit(in_the_way_unit.id)
if gc.can_blueprint(unit.id, variables.unit_types["rocket"], dir):
gc.blueprint(unit.id, variables.unit_types["rocket"], dir)
variables.saviour_blueprinted = True
new_blueprint = gc.sense_unit_at_location(map_loc)
variables.saviour_blueprinted_id = new_blueprint.id
variables.all_building_locations[variables.saviour_blueprinted_id] = map_loc
blueprinted = True
break
if not blueprinted:
variables.num_unsuccessful_savior+=1
my_role = "idle"
for role in current_roles:
if unit.id in current_roles[role]:
my_role = role
#print()
#print("on unit #",unit.id, "position: ",my_location, "role: ",my_role)
#print("KARBONITE: ",gc.karbonite()
current_num_workers = info[0]
max_num_workers = get_replication_cap(gc,karbonite_locations, info, num_enemies)
worker_spacing = 8
#print("REPLICATION CAP: ",max_num_workers)
# replicates if unit is able to (cooldowns, available directions etc.)
if current_num_workers < max_num_workers:
try_replicate = replicate(gc,unit)
if try_replicate:
return
# runs this block every turn if unit is miner
if my_role == "miner":
start_time = time.time()
mine(gc,unit,my_location,earth_start_map,karbonite_locations,current_roles, building_assignment, battle_locs)
#print("mining time: ",time.time() - start_time)
# if unit is builder
elif my_role == "builder":
start_time = time.time()
build(gc,unit,my_location,earth_start_map,building_assignment,current_roles)
#print("building time: ",time.time() - start_time)
# if unit is blueprinter
elif my_role == "blueprinter":
start_time = time.time()
blueprint(gc,unit,my_location,building_assignment,blueprinting_assignment,current_roles)
#print("blueprinting time: ",time.time() - start_time)
# if unit is boarder
elif my_role == "boarder":
board(gc,unit,my_location,current_roles)
# if unit is idle
elif my_role == "repairer":
repair(gc,unit,my_location,current_roles)
else:
nearby= gc.sense_nearby_units_by_team(my_location, worker_spacing, variables.my_team)
away_from_units = sense_util.best_available_direction(gc,unit,nearby)
#print(unit.id, "at", unit.location.map_location(), "is trying to move to", away_from_units)
movement.try_move(gc,unit,away_from_units)
def near_factory(my_location):
my_location_coords = (my_location.x, my_location.y)
for coords in explore.coord_neighbors(my_location_coords, diff = explore.diffs_20):
if coords in variables.passable_locations_earth and variables.passable_locations_earth[coords]:
map_loc = bc.MapLocation(bc.Planet.Earth, coords[0], coords[1])
if variables.gc.can_sense_location(map_loc) and variables.gc.has_unit_at_location(map_loc):
unit = variables.gc.sense_unit_at_location(map_loc)
if unit.unit_type == variables.unit_types["factory"]:
return True
return False
# returns whether unit is a miner or builder, currently placeholder until we can use team-shared data to designate unit roles
def designate_roles():
"""
my_location = my_unit.location.map_location()
#print(my_location)
start_map = gc.starting_map(bc.Planet(0))
nearby = gc.sense_nearby_units(my_location, my_unit.vision_range)
"""
my_units = variables.my_units
unit_types = variables.unit_types
current_roles = variables.current_worker_roles
if variables.curr_planet == bc.Planet.Mars:
workers = []
worker_id_list = []
for my_unit in my_units:
if not my_unit.location.is_on_map():
continue
elif my_unit.unit_type == unit_types["worker"]:
workers.append(my_unit)
worker_id_list.append(my_unit.id)
## DESIGNATION FOR ALREADY ASSIGNED WORKERS ##
for worker in workers:
if worker.id not in current_roles["miner"]:
current_roles["miner"].append(worker.id)
else:
gc = variables.gc
blueprinting_queue = variables.blueprinting_queue
blueprinting_assignment = variables.blueprinting_assignment
building_assignment = variables.building_assignment
current_roles = variables.current_worker_roles
karbonite_locations = variables.karbonite_locations
unit_types = variables.unit_types
invalid_building_locations = variables.invalid_building_locations
all_building_locations = variables.all_building_locations
blueprint_count = 0
factory_count = 0
rocket_count = 0
rocket_ready_for_loading = False
please_move = False
min_workers_per_building = 3
recruitment_radius = 20
workers = []
worker_id_list = []
earth = variables.earth
start_map = variables.earth_start_map
my_units = variables.my_units
for my_unit in my_units:
if not my_unit.location.is_on_map():
continue
if my_unit.unit_type == unit_types["factory"]: # count ALL factories
if not my_unit.structure_is_built():
blueprint_count += 1
factory_count += 1
elif my_unit.unit_type == unit_types["rocket"]:
if my_unit.structure_is_built() and len(my_unit.structure_garrison()) < my_unit.structure_max_capacity():
rocket_ready_for_loading = True
#print("UNITS IN GARRISON",unit.structure_garrison())
if not my_unit.structure_is_built():
if my_unit.id not in building_assignment.keys():
building_assignment[my_unit.id] = []
blueprint_count += 1
rocket_count += 1
elif my_unit.unit_type == unit_types["worker"]:
workers.append(my_unit)
worker_id_list.append(my_unit.id)
#print("part 1",time.time()-start_time)
update_for_dead_workers(gc,current_roles,blueprinting_queue,blueprinting_assignment,building_assignment)
update_building_assignment(gc,building_assignment,blueprinting_assignment)
update_deposit_info(gc,karbonite_locations)
#print("part 2",time.time()-start_time)
max_num_builders = 5
max_num_blueprinters = 2 #len(blueprinting_queue)*2 + 1 # at least 1 blueprinter, 2 blueprinters per cluster
num_miners_per_deposit = 2 #approximate, just to cap miner count as deposit number decreases
closest_workers_to_blueprint = {} # dictionary mapping blueprint_id to a list of worker id sorted by distance to the blueprint
workers_in_recruitment_range = {}
for building_id in building_assignment:
assigned_workers = building_assignment[building_id]
blueprint_location = gc.unit(building_id).location.map_location()
workers_per_building = get_workers_per_building(gc,start_map,blueprint_location)
if len(assigned_workers) < workers_per_building:
workers_dist_to_blueprint_sorted = sorted(workers,key=lambda unit:sense_util.distance_squared_between_maplocs(unit.location.map_location(), blueprint_location))
closest_worker_ids = []
for worker_unit in workers_dist_to_blueprint_sorted:
if worker_unit.id in current_roles["blueprinter"] or worker_unit.id in current_roles["builder"]:
continue
if building_id not in workers_in_recruitment_range:
worker_unit_loc = worker_unit.location.map_location()
if sense_util.distance_squared_between_maplocs(worker_unit_loc, blueprint_location) > recruitment_radius:
workers_in_recruitment_range[building_id] = len(closest_worker_ids)
closest_worker_ids.append(worker_unit.id)
closest_workers_to_blueprint[building_id] = closest_worker_ids
#print("closest workers to blueprint",closest_workers_to_blueprint)
#print("workers in recruitment range",workers_in_recruitment_range)
closest_workers_to_site = {} # dictionary mapping blueprint_id to a list of worker id sorted by distance to the blueprint
for assigned_blueprinting_site in blueprinting_queue:
assigned_location = assigned_blueprinting_site.map_location
workers_dist_to_site_sorted = sorted(workers,key=lambda unit:sense_util.distance_squared_between_maplocs(unit.location.map_location(), assigned_location))
closest_worker_ids = list(map(lambda unit: unit.id, workers_dist_to_site_sorted))
for blueprinter_id in current_roles["blueprinter"]:
if blueprinter_id in closest_worker_ids:
closest_worker_ids.remove(blueprinter_id)
closest_workers_to_site[assigned_blueprinting_site] = closest_worker_ids
#print("blueprinting_assignment",blueprinting_assignment)
#print("building_assignment",building_assignment)
#print("blueprinting_queue",blueprinting_queue)
######################
## ROLE DESIGNATION ##
######################
for worker in workers:
worker_location = worker.location.map_location()
open_slots_to_build = False
unit_build_override = False
assigned_building_id = None
my_role = "idle"
role_revised = False
## DESIGNATION FOR ALREADY ASSIGNED WORKERS ##
for role in current_roles.keys():
if worker.id in current_roles[role]:
# code to prevent workers from mining in front of building entrances
my_role = role
#print("worker id",worker.id,"is_role_assigned",is_role_assigned)
break
# recruit nearby workers to finish building
if my_role != "blueprinter" and my_role != "builder":
for building_id in building_assignment:
assigned_workers = building_assignment[building_id]
assigned_location = gc.unit(building_id).location.map_location()
workers_per_building = get_workers_per_building(gc,start_map,assigned_location)
#print("workers per building",workers_per_building)
num_open_slots_to_build = workers_per_building - len(assigned_workers)
if num_open_slots_to_build > 0:
closest_worker_list = closest_workers_to_blueprint[building_id]
if building_id in workers_in_recruitment_range:
num_workers_in_range = workers_in_recruitment_range[building_id]
else:
num_workers_in_range = len(closest_worker_list)
if len(assigned_workers) > min_workers_per_building and num_workers_in_range == 0:
continue
if num_open_slots_to_build <= num_workers_in_range:
recruitable_workers = closest_worker_list[:num_open_slots_to_build]
else:
optimal_number = max(min_workers_per_building,num_workers_in_range)
recruitable_workers = closest_worker_list[:optimal_number]
if worker.id in recruitable_workers:
if my_role != "idle" and worker.id in current_roles[my_role]:
current_roles[my_role].remove(worker.id)
current_roles["builder"].append(worker.id)
building_assignment[building_id].append(worker.id)
role_revised = True
my_role = "builder"
break
# recruit nearby worker to place down a blueprint
if my_role != "blueprinter" and not role_revised:
building_in_progress_count = len(building_assignment.keys()) + len(blueprinting_assignment.keys())
if building_in_progress_count < building_in_progress_cap(gc):
# if it finds a nice location for building, put it in queue
if len(blueprinting_assignment) < blueprinting_queue_limit(gc):
if can_blueprint_rocket(gc,rocket_count):
best_location_tuple = get_optimal_building_location(gc,start_map,worker_location,unit_types["rocket"],karbonite_locations,blueprinting_queue,blueprinting_assignment)
#print("time for building location",time.time() - inside_time)
if best_location_tuple is not None:
best_location = bc.MapLocation(earth, best_location_tuple[0], best_location_tuple[1])
if my_role != "idle" and worker.id in current_roles[my_role]:
current_roles[my_role].remove(worker.id)
current_roles["blueprinter"].append(worker.id)
new_site = BuildSite(best_location,unit_types["rocket"])
blueprinting_assignment[worker.id] = new_site
nearby_sites = adjacent_locations(best_location)
for site in nearby_sites:
site_coord = (site.x,site.y)
if site_coord not in variables.passable_locations_earth or not variables.passable_locations_earth[site_coord]: continue
if invalid_building_locations[site_coord]:
invalid_building_locations[site_coord] = False
my_role = "blueprinter"
#blueprinting_queue.append(new_site)
elif can_blueprint_factory(gc,factory_count):
best_location_tuple = get_optimal_building_location(gc,start_map,worker_location,unit_types["factory"],karbonite_locations,blueprinting_queue,blueprinting_assignment)
#print("time for building location",time.time() - inside_time)
if best_location_tuple is not None:
best_location = bc.MapLocation(earth, best_location_tuple[0], best_location_tuple[1])
#print(worker.id,"can build a factory")
if my_role != "idle" and worker.id in current_roles[my_role]:
current_roles[my_role].remove(worker.id)
current_roles["blueprinter"].append(worker.id)
new_site = BuildSite(best_location,unit_types["factory"])
blueprinting_assignment[worker.id] = new_site
best_location_coords = (best_location.x, best_location.y)
nearby_sites = factory_spacing_locations(best_location)
for site in nearby_sites:
site_coord = (site.x,site.y)
if site_coord not in variables.passable_locations_earth or not variables.passable_locations_earth[site_coord]: continue
if invalid_building_locations[site_coord]:
invalid_building_locations[site_coord] = False
my_role = "blueprinter"
#blueprinting_queue.append(new_site)
#print(worker.id," just added to building queue",best_location)
#print(worker.id,"cannot build a rocket or factory")
#print(worker.id,"cannot build a rocket or factory")
## DESIGNATION FOR UNASSIGNED WORKERS ##
if my_role != "idle":
continue
num_miners = len(current_roles["miner"])
num_blueprinters = len(current_roles["blueprinter"])
num_builders = len(current_roles["builder"])
num_boarders = len(current_roles["boarder"])
num_repairers = len(current_roles["repairer"])
# early game miner production
if variables.my_karbonite < 100 and num_miners < 2:
new_role = "miner"
# become builder when there are available blueprints
elif num_miners_per_deposit * len(karbonite_locations) > num_miners:
new_role = "miner"
elif rocket_ready_for_loading:
new_role = "boarder"
else:
new_role = "repairer"
current_roles[new_role].append(worker.id)
def get_workers_per_building(gc,start_map,building_location):
max_workers_per_building = 6
num_adjacent_spaces = 0
adjacent = adjacent_locations(building_location)
for location in adjacent:
location_coord = (location.x,location.y)
if location_coord not in variables.passable_locations_earth or location_coord == (0,0): continue
if variables.passable_locations_earth[location_coord]:
num_adjacent_spaces += 1
return min(num_adjacent_spaces,max_workers_per_building)
def update_for_dead_workers(gc,current_roles,blueprinting_queue,blueprinting_assignment,building_assignment):
live_unit_ids = variables.list_of_unit_ids
for role in current_roles.keys():
for worker_id in current_roles[role][:]:
if worker_id not in live_unit_ids:
current_roles[role].remove(worker_id)
if role == "builder":
for building_id in building_assignment:
if worker_id in building_assignment[building_id]:
building_assignment[building_id].remove(worker_id)
break
elif role == "blueprinter":
if worker_id in blueprinting_assignment:
build_site = blueprinting_assignment[worker_id]
del blueprinting_assignment[worker_id]
def repair(gc, unit, my_location, current_roles):
map_loc = my_location
closest = None
closest_dist = float('inf')
closest_map_loc = None
for fact in variables.my_units:
if fact.unit_type == variables.unit_types["factory"]:
if fact.structure_is_built() and fact.health < fact.max_health:
loc = fact.location.map_location()
dist = sense_util.distance_squared_between_maplocs(map_loc, loc)
if dist < closest_dist:
closest = fact
closest_dist = dist
closest_map_loc = loc
if closest is not None:
if gc.can_repair(unit.id, closest.id):
gc.repair(unit.id, closest.id)
else:
try_move_smartly(unit, map_loc, closest_map_loc)
else:
current_roles["repairer"].remove(unit.id)
def try_move_smartly(unit, map_loc1, map_loc2):
if sense_util.distance_squared_between_maplocs(map_loc1, map_loc2) < (2 * variables.bfs_fineness ** 2) + 1:
dir = map_loc1.direction_to(map_loc2)
else:
our_coords = (map_loc1.x, map_loc1.y)
target_coords_thirds = (
int(map_loc2.x / variables.bfs_fineness), int(map_loc2.y / variables.bfs_fineness))
if (our_coords, target_coords_thirds) in variables.precomputed_bfs:
dir = variables.precomputed_bfs[(our_coords, target_coords_thirds)]
else:
dir = map_loc1.direction_to(map_loc2)
movement.try_move(variables.gc, unit, dir)
def board(gc,my_unit,my_location,current_roles):
finished_rockets = []
for unit in variables.my_units:
if unit.unit_type == variables.unit_types["rocket"] and unit.structure_is_built() and len(unit.structure_garrison()) < unit.structure_max_capacity():
finished_rockets.append(unit)
minimum_distance = float('inf')
closest_rocket = None
for rocket in finished_rockets:
dist_to_rocket = sense_util.distance_squared_between_maplocs(my_location, rocket.location.map_location())
if dist_to_rocket < minimum_distance:
minimum_distance = dist_to_rocket
closest_rocket = rocket
if closest_rocket is None:
current_roles["boarder"].remove(my_unit.id)
return
rocket_location = closest_rocket.location.map_location()
if my_location.is_adjacent_to(rocket_location):
if gc.can_load(closest_rocket.id,my_unit.id):
gc.load(closest_rocket.id,my_unit.id)
current_roles["boarder"].remove(my_unit.id)
else:
#print(unit.id, 'moving toward rocket')
try_move_smartly(my_unit, my_location, rocket_location)
#direction_to_rocket = my_location.direction_to(rocket_location)
#movement.try_move(gc,my_unit,direction_to_rocket)
# parameters: amount of karbonite on the map, factory number ( diff behavior before and after our first factory),
def get_replication_cap(gc,karbonite_locations, info, num_enemies):
#print("KARBONITE INFO LENGTH: ",len(karbonite_locations))
#print(len(karbonite_locations))
if num_enemies > 2*sum(info[1:4])/3:
#print('replication cap yes')
return 6
if info[5] > 1:
return min(3 + float(500+gc.round())/7000 * len(karbonite_locations),15)
else:
return 6
def replicate(gc,unit):
replicated = False
if variables.my_karbonite >= variables.unit_types["worker"].replicate_cost():
for direction in variables.directions:
if gc.can_replicate(unit.id,direction):
replicated = True
gc.replicate(unit.id,direction)
return replicated
# FOR EARTH ONLY
def update_deposit_info(gc,karbonite_locations):
planet = variables.earth
karbonite_locations_keys = list(karbonite_locations.keys())[:]
for x,y in karbonite_locations_keys:
map_location = bc.MapLocation(planet,x,y)
# we can only update info about deposits we can see with our units
if not gc.can_sense_location(map_location):
continue
current_karbonite = gc.karbonite_at(map_location)
if current_karbonite == 0:
del karbonite_locations[(x,y)]
elif karbonite_locations[(x,y)] != current_karbonite:
karbonite_locations[(x,y)] = current_karbonite
# returns map location of closest karbonite deposit
def get_closest_deposit(gc,unit,position,karbonite_locations,in_vision_range=False):
planet = variables.earth
current_distance = float('inf')
closest_deposit = bc.MapLocation(planet,-1,-1)
position_coord = (position.x,position.y)
start_time = time.time()
is_deposit_in_vision_range = False
for location_coord in explore.coord_neighbors(position_coord, diff=explore.diffs_50, include_self=True):
if location_coord in karbonite_locations:
is_deposit_in_vision_range = True
karbonite_location = bc.MapLocation(planet,location_coord[0],location_coord[1])
distance_to_deposit = sense_util.distance_squared_between_coords(position_coord,location_coord)
if distance_to_deposit < current_distance:
current_distance = distance_to_deposit
closest_deposit = karbonite_location
if not is_deposit_in_vision_range:
for x,y in karbonite_locations.keys():
karbonite_location = bc.MapLocation(planet,x,y)
karbonite_coord = (x,y)
distance_to_deposit = sense_util.distance_squared_between_coords(position_coord,karbonite_coord)
#keep updating current closest deposit to unit
if distance_to_deposit < current_distance:
current_distance = distance_to_deposit
closest_deposit = karbonite_location
#print("getting closest deposit time:",time.time() - start_time)
return closest_deposit
def mine(gc,my_unit,my_location,start_map,karbonite_locations,current_roles, building_assignment, battle_locs):
start_time = time.time()
closest_deposit = get_closest_deposit(gc,my_unit,my_location,karbonite_locations)
#print("closest deposit time",time.time() - start_time)
#check to see if there even are deposits
if start_map.on_map(closest_deposit):
direction_to_deposit = my_location.direction_to(closest_deposit)
#print(unit.id, "is trying to mine at", direction_to_deposit)
enemy_units = gc.sense_nearby_units_by_team(my_location, my_unit.vision_range, sense_util.enemy_team(gc))
dangerous_types = [variables.unit_types["knight"], variables.unit_types["ranger"], variables.unit_types["mage"]]
dangerous_enemies = []
# only adds enemy units that can attack
for unit in enemy_units:
enemy_loc = unit.location.map_location()
add_loc = evaluate_battle_location(gc, enemy_loc, battle_locs)
if add_loc:
battle_locs[(enemy_loc.x, enemy_loc.y)] = clusters.Cluster(allies=set(),enemies=set([unit.id]))
if unit.unit_type in dangerous_types:
dangerous_enemies.append(unit)
if len(dangerous_enemies) > 0:
dir = sense_util.best_available_direction(gc, my_unit, dangerous_enemies)
movement.try_move(gc, my_unit, dir)
elif my_location.is_adjacent_to(closest_deposit) or my_location == closest_deposit:
# mine if adjacent to deposit
if gc.can_harvest(my_unit.id,direction_to_deposit):
gc.harvest(my_unit.id,direction_to_deposit)
current_roles["miner"].remove(my_unit.id)
#print(unit.id," just harvested!")
else:
# move toward deposit
try_move_smartly(my_unit, my_location, closest_deposit)
#movement.try_move(gc,my_unit,direction_to_deposit)
else:
current_roles["miner"].remove(my_unit.id)
#print(unit.id," no deposits around")
def evaluate_battle_location(gc, loc, battle_locs):
"""
Chooses whether or not to add this enemy's location as a new battle location.
"""
# units_near = gc.sense_nearby_units_by_team(loc, battle_radius, constants.enemy_team)
valid = True
loc_coords = (loc.x, loc.y)
locs_near = explore.coord_neighbors(loc_coords, include_self = True, diff = explore.diffs_10)#gc.all_locations_within(loc, battle_radius)
for near_coords in locs_near:
if near_coords in battle_locs:
valid = False
return valid
def pick_closest_building_assignment(gc, unit, building_assignment):
closest = None
min_dist = float('inf')
map_loc = unit.location.map_location()
for building in building_assignment.values():
dist = sense_util.distance_squared_between_maplocs(map_loc, building.get_map_location())
if dist< min_dist:
closest = building
min_dist = dist
return closest
def mine_mars(gc,unit,my_location):
all_locations = gc.all_locations_within(my_location,unit.vision_range)
planet = variables.mars
start_map = variables.mars_start_map
worker_spacing = 8
current_distance = float('inf')
closest_deposit = bc.MapLocation(planet,-1,-1)
for deposit_location in all_locations:
if gc.karbonite_at(deposit_location) == 0:
continue
distance_to_deposit = sense_util.distance_squared_between_maplocs(my_location, deposit_location)
#keep updating current closest deposit to unit
if distance_to_deposit < current_distance:
current_distance = distance_to_deposit
closest_deposit = deposit_location
#check to see if there even are deposits
if start_map.on_map(closest_deposit):
direction_to_deposit = my_location.direction_to(closest_deposit)
#print(unit.id, "is trying to mine at", direction_to_deposit)
if my_location.is_adjacent_to(closest_deposit) or my_location == closest_deposit:
# mine if adjacent to deposit
if gc.can_harvest(unit.id,direction_to_deposit):
gc.harvest(unit.id,direction_to_deposit)
#print(unit.id," just harvested on Mars!")
else:
# move toward deposit
try_move_smartly(unit, my_location, closest_deposit)
#movement.try_move(gc,unit,direction_to_deposit)
else:
nearby = gc.sense_nearby_units_by_team(my_location, worker_spacing, variables.my_team)
away_from_units = sense_util.best_available_direction(gc,unit,nearby)
#print(unit.id, "at", unit.location.map_location(), "is trying to move to", away_from_units)
movement.try_move(gc,unit,away_from_units)
# updates building assignments in case buildings are destroyed before they are built
def update_building_assignment(gc,building_assignment,blueprinting_assignment):
keys = list(building_assignment.keys())[:]
invalid_building_locations = variables.invalid_building_locations
my_unit_ids = [unit.id for unit in gc.my_units()]
for building_id in keys:
if building_id not in my_unit_ids:
del building_assignment[building_id]
removed_building_location = variables.all_building_locations[building_id]
reevaluated_sites = factory_spacing_locations(removed_building_location)
# reevaluate
for site in reevaluated_sites:
site_coords = (site.x,site.y)
if site_coords not in variables.passable_locations_earth or not variables.passable_locations_earth[site_coords]: continue
if invalid_building_locations[site_coords]: continue
nearby = gc.sense_nearby_units(site,variables.factory_spacing)
for other in nearby:
if other.unit_type == variables.unit_types["factory"] or other.unit_type == variables.unit_types["rocket"]:
invalid_building_locations[site_coords] = False
continue
for worker_id in blueprinting_assignment:
assigned_site = blueprinting_assignment[worker_id]
if sense_util.distance_squared_between_maplocs(site, assigned_site.map_location) < variables.factory_spacing:
invalid_building_locations[site_coords] = False
continue
invalid_building_locations[site_coords] = True
def assign_unit_to_build(gc,my_unit,my_location,start_map,building_assignment):
available_blueprints = []
for blueprint_id in building_assignment:
possible_blueprint = gc.unit(blueprint_id)
workers_per_building = get_workers_per_building(gc,start_map,possible_blueprint.location.map_location())
if len(building_assignment[blueprint_id]) < workers_per_building:
#print("available blueprints to work on")
available_blueprints.append(possible_blueprint)
smallest_distance = float('inf')
closest_building = None
#print(len(blueprints))
for blueprint in available_blueprints:
blueprint_location = blueprint.location.map_location()
distance_to_blueprint = sense_util.distance_squared_between_maplocs(my_location, blueprint_location)
if distance_to_blueprint < smallest_distance:
smallest_distance = distance_to_blueprint
closest_building = blueprint
#print("my_unit.id",my_unit.id,"closest_building",closest_building)
if closest_building is not None:
building_assignment[closest_building.id].append(my_unit.id)
return closest_building
def build(gc,my_unit,my_location,start_map,building_assignment,current_roles):
#print("building_assignment",building_assignment)
my_nearby_units = variables.my_units
unit_was_not_assigned = True
assigned_building = None
#print("unit",my_unit.id,"is building")
# loop through building assignments and look for my_unit.id if it is assigned
for building_id in building_assignment:
if my_unit.id in building_assignment[building_id] and building_id in variables.list_of_unit_ids:
assigned_building = gc.unit(building_id)
#print("assigned_building",assigned_building.location.map_location())
if assigned_building.structure_is_built():
#print(my_unit.id,"assigned_building was already built")
del building_assignment[building_id]
assigned_building = assign_unit_to_build(gc,my_unit,my_location,start_map,building_assignment)
unit_was_not_assigned = False
break
else:
unit_was_not_assigned = False
if unit_was_not_assigned:
assigned_building = assign_unit_to_build(gc,my_unit,my_location,start_map,building_assignment)
if assigned_building is None:
#print(my_unit.id, "there are no blueprints around")
current_roles["builder"].remove(my_unit.id)
return
#print("unit has been assigned to build at",assigned_building.location.map_location())
assigned_location = assigned_building.location.map_location()
if my_location.is_adjacent_to(assigned_location):
if gc.can_build(my_unit.id,assigned_building.id):
#print(my_unit.id, "is building factory at ",assigned_location)
gc.build(my_unit.id,assigned_building.id)
if assigned_building.structure_is_built():
current_roles["builder"].remove(my_unit.id)
del building_assignment[building_id]
return
# if not adjacent move toward it
else:
try_move_smartly(my_unit, my_location, assigned_location)
#direction_to_blueprint = my_location.direction_to(assigned_location)
#movement.try_move(gc,my_unit,direction_to_blueprint)
def adjacent_locations(location):
d = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]
planet = location.planet
x = location.x
y = location.y
output = []
for dx,dy in d:
if (x+dx,y+dy) in variables.passable_locations_earth:
if variables.passable_locations_earth[(x+dx,y+dy)]:
output.append(bc.MapLocation(planet,x+dx,y+dy))
return output
def factory_spacing_locations(location):
d = variables.factory_spacing_diff
planet = location.planet
x = location.x
y = location.y
output = []
for dx,dy in d:
if (x+dx,y+dy) in variables.passable_locations_earth:
if variables.passable_locations_earth[(x+dx,y+dy)]:
output.append(bc.MapLocation(planet,x+dx,y+dy))
return output
def is_valid_blueprint_location(gc,start_map,location,blueprinting_queue,blueprinting_assignment):
blueprint_spacing = 10
nearby = gc.sense_nearby_units(location,blueprint_spacing)
if start_map.on_map(location) and location not in variables.impassable_terrain_earth:
for other in nearby:
if other.unit_type == variables.unit_types["factory"] or other.unit_type == variables.unit_types["rocket"]:
return False
for worker_id in blueprinting_assignment:
assigned_site = blueprinting_assignment[worker_id]
if sense_util.distance_squared_between_maplocs(location, assigned_site.map_location) < blueprint_spacing:
return False
for enemy_loc in variables.init_enemy_locs:
if sense_util.distance_squared_between_maplocs(location, enemy_loc) < 50:
return False
return True
return False
# generates locations to build factories that are close to karbonite deposits
def get_optimal_building_location(gc, start_map, center, building_type, karbonite_locations, blueprinting_queue, blueprinting_assignment):
potential_locations = []
karbonite_adjacent_locations = {}
no_deposits_located = True
center_coords = (center.x, center.y)
if building_type == variables.unit_types["rocket"]:
for default_location_coords in explore.coord_neighbors(center_coords, include_self = True):
default_location = bc.MapLocation(variables.curr_planet, default_location_coords[0],
default_location_coords[1])
if default_location_coords in variables.passable_locations_earth and variables.passable_locations_earth[default_location_coords] and variables.invalid_building_locations[default_location_coords]:
return default_location_coords
for location_coords in explore.coord_neighbors(center_coords, diff=explore.diffs_20, include_self=True):
location = bc.MapLocation(variables.curr_planet, location_coords[0], location_coords[1])
if location_coords in variables.passable_locations_earth and variables.passable_locations_earth[location_coords] and variables.invalid_building_locations[location_coords]:
# print("optimal building location time",time.time() - start_time)
if location_coords in karbonite_locations:
if karbonite_locations[location_coords] > 0:
continue
for adjacent_location in explore.coord_neighbors(location_coords):
if adjacent_location in karbonite_locations:
karbonite_value = karbonite_locations[adjacent_location]
else:
karbonite_value = 0
if location_coords not in karbonite_adjacent_locations:
karbonite_adjacent_locations[location_coords] = karbonite_value
else:
karbonite_adjacent_locations[location_coords] += karbonite_value
# print("par t2 location time",time.time() - start_time)
if karbonite_adjacent_locations[location_coords] > 0:
no_deposits_located = False
if len(karbonite_adjacent_locations) == 0:
return None
elif no_deposits_located:
for default_location_coords in explore.coord_neighbors(center_coords, include_self = True):
default_location = bc.MapLocation(variables.curr_planet, default_location_coords[0],
default_location_coords[1])
if is_valid_blueprint_location(gc, start_map, default_location, blueprinting_queue,
blueprinting_assignment):
return default_location_coords
return max(list(karbonite_adjacent_locations.keys()), key=lambda loc: karbonite_adjacent_locations[loc])
"""
# generates locations to build factories that are close to karbonite deposits
def get_optimal_building_location(gc,start_map,center,karbonite_locations,blueprinting_queue,blueprinting_assignment):
potential_locations = []
karbonite_adjacent_locations = {}
no_deposits_located = True
for location in gc.all_locations_within(center,20):
start_time = time.time()
if (location.x, location.y) in variables.passable_locations_earth and variables.passable_locations_earth[(location.x, location.y)] and variables.invalid_building_locations[(location.x,location.y)]:
#print("optimal building location time",time.time() - start_time)
loc_key = (location.x,location.y)
if loc_key in karbonite_locations:
if karbonite_locations[loc_key] > 0:
continue
start_time = time.time()
for adjacent_location in adjacent_locations(location):
if location == adjacent_location: continue
adj_key = (adjacent_location.x,adjacent_location.y)
if adj_key in karbonite_locations:
karbonite_value = karbonite_locations[adj_key]
else:
karbonite_value = 0
if loc_key not in karbonite_adjacent_locations:
karbonite_adjacent_locations[loc_key] = karbonite_value
else:
karbonite_adjacent_locations[loc_key] += karbonite_value
#print("par t2 location time",time.time() - start_time)
if karbonite_adjacent_locations[loc_key] > 0:
no_deposits_located = False
if len(karbonite_adjacent_locations) == 0:
return None
elif no_deposits_located:
for default_location in adjacent_locations(center):
if is_valid_blueprint_location(gc,start_map,default_location,blueprinting_queue,blueprinting_assignment):
return (default_location.x,default_location.y)
return max(list(karbonite_adjacent_locations.keys()),key=lambda loc:karbonite_adjacent_locations[loc])
"""
# function to flexibly determine when a good time to expand factories
def can_blueprint_factory(gc,factory_count):
if gc.round()>250 and variables.num_enemies<5:
return False
return factory_count < get_factory_limit()
def can_blueprint_rocket(gc,rocket_count):
if variables.num_passable_locations_mars>0 and variables.research.get_level(variables.unit_types["rocket"]) > 0:
if gc.round() > 180:
return True
return False
def blueprinting_queue_limit(gc):
return 1
def get_factory_limit():
return max(4,int(variables.my_karbonite/30))
def get_rocket_limit():
return 3
def get_closest_site(my_unit,my_location,blueprinting_queue):
smallest_distance = float('inf')
closest_site = None
for site in blueprinting_queue:
distance_to_site = sense_util.distance_squared_between_maplocs(my_location, site.map_location)
if distance_to_site < smallest_distance:
smallest_distance = distance_to_site
closest_site = site
return closest_site
# controls how many buildings we can have in progress at a time, can modify this to scale with karbonite number, round # or number of units (enemy or ally)
def building_in_progress_cap(gc):
return 2
def blueprint(gc,my_unit,my_location,building_assignment,blueprinting_assignment,current_roles):
directions = variables.directions
#print('BLUEPRINTING')
# assign this unit to build a blueprint, if nothing to build just move away from other factories
if my_unit.id not in blueprinting_assignment:
# print(my_unit.id,"currently has no assigned site")
current_roles["blueprinter"].remove(my_unit.id)
# build blueprint in assigned square
if my_unit.id in blueprinting_assignment:
assigned_site = blueprinting_assignment[my_unit.id]
# if my_unit.id in blueprinting_assignment:
#print("unit",my_unit.id,"blueprinting at",blueprinting_assignment[my_unit.id])
#print(unit.id, "is assigned to building in", assigned_site.map_location)
direction_to_site = my_location.direction_to(assigned_site.map_location)
if my_location.is_adjacent_to(assigned_site.map_location):
if gc.can_blueprint(my_unit.id, assigned_site.building_type, direction_to_site):
gc.blueprint(my_unit.id, assigned_site.building_type, direction_to_site)
new_blueprint = gc.sense_unit_at_location(assigned_site.map_location)
variables.all_building_locations[new_blueprint.id] = assigned_site.map_location
# update shared data structures
building_assignment[new_blueprint.id] = [my_unit.id] # initialize new building
#print("building_assignment",building_assignment)
#print("blueprinting assignment before",blueprinting_assignment)
del blueprinting_assignment[my_unit.id]
current_roles["blueprinter"].remove(my_unit.id)
current_roles["builder"].append(my_unit.id)
#print("blueprinting assignment after",blueprinting_assignment)
#print(my_unit.id, " just created a blueprint!")
else:
pass
#print(my_unit.id, "can't build but is right next to assigned site")
elif my_location == assigned_site.map_location:
# when unit is currently on top of the queued building site
d = random.choice(variables.directions)
movement.try_move(gc,my_unit,d)
else:
# move toward queued building site
next_direction = my_location.direction_to(assigned_site.map_location)
movement.try_move(gc,my_unit,next_direction)
"""
try_move_smartly(my_unit,my_location,assigned_site.map_location)
"""
class BuildSite:
def __init__(self,map_location,building_type):
self.map_location = map_location
self.building_type = building_type
def get_map_location(self):
return self.map_location
def get_building_type(self):
return self.building_type
def __str__(self):
return "{map_location : " + str(self.map_location) + ", building_type : " + str(self.building_type) + " }"
def __repr__(self):
return "{map_location : " + str(self.map_location) + ", building_type : " + str(self.building_type) + " }"
def __eq__(self,other):
return self.map_location == other.map_location and self.building_type == other.building_type
def __hash__(self):
return self.map_location.x + self.map_location.y
|
import ssl
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class Top10Notices:
def nsu_top10_notice(self):
withlink = []
links = []
listNotices = []
url = 'http://www.northsouth.edu/nsu-announcements/?anaunc_start=0'
source_code = urllib.request.urlopen(url)
soup = BeautifulSoup(source_code.read(), "html.parser")
count = 1
for title in soup.find_all('div', {'class': 'post-scroller-item'}):
for link in title.find_all('a'):
listNotices.append(link.text + ' ')
links.append('http://www.northsouth.edu/' + link.get('href'))
count += 1
if count > 10:
break
withlink = [listNotices, links]
return withlink
def aiub_top10_notice(self):
listNotices = []
linksNotices = []
url = 'http://www.aiub.edu/'
source_code = urllib.request.urlopen(url)
soup = BeautifulSoup(source_code.read(), "html.parser")
cnt = 1
for li in soup.find_all('div', {'class': 'bs-callout'}):
for link in li.find_all('a'):
listNotices.append(link.text + ' ')
linksNotices.append('http://www.aiub.edu' + link.get('href'))
# print(str(cnt) + ' ' + link.text)
# print('http://www.aiub.edu' + link.get('href'))
cnt += 1
if cnt > 10:
break
withlinks = [listNotices, linksNotices]
return withlinks
def bracu_top10_notice(self):
listNotices = []
linksNotices = []
context = ssl._create_unverified_context()
url = 'http://www.bracu.ac.bd/#announcement'
source_code = urllib.request.urlopen(url, context=context)
soup = BeautifulSoup(source_code.read(), "html.parser")
count = 1
for linkdiv in soup.find_all('div', {'class': 'calender-item clearfix'}):
for link in linkdiv.find_all('a'):
listNotices.append(link.text + ' ')
linksNotices.append("http://www.bracu.ac.bd" + link.get('href'))
# print(str(count) + ' ' + link.text)
# print("http://www.bracu.ac.bd" + link.get('href'))
count += 1
if count > 10:
break
withLinks = [listNotices, linksNotices]
return withLinks
def ewu_top10_notice(self):
listNotices = []
linksNotices = []
url = 'https://www.ewubd.edu/news/'
source_code = urllib.request.urlopen(url)
# print(source_code)
soup = BeautifulSoup(source_code.read(), "html.parser")
count = 1
for linkdiv in soup.find_all('div', {'class': 'news-wrap news-wrap-height'}):
link = linkdiv.find('h3')
listNotices.append(link.text + ' ')
linksNotices.append(url)
# print(str(count) + ' ' + link.text)
# print(url)
count += 1
if count > 10:
break
# listNotices.append(link.text + ' ')
# linksNotices.append(link.get('href'))
withlinks = [listNotices, linksNotices]
return withlinks
def iub_top10_notice(self):
listNotices = []
linksNotices = []
url = 'http://www.iub.edu.bd/'
sour_code = urllib.request.urlopen(url)
soup = BeautifulSoup(sour_code.read(), "html.parser")
count = 1
link_div = soup.find('div', {'class': 'col-lg-5 resources'})
for link in link_div.find_all('a'):
listNotices.append(link.text + ' ')
linksNotices.append(link.get('href'))
# print(str(count) + ' ' + link.text)
# print(link.get('href'))
count += 1
if count > 10:
break
withlinks = [listNotices, linksNotices]
return withlinks
def iubat_top10_notice(self):
listNotices = []
linksNotices = []
url = 'https://iubat.edu/notice/'
driver = webdriver.Firefox()
driver.get(url)
try:
wait = WebDriverWait(driver, 60)
element = wait.until(
EC.presence_of_element_located((By.CLASS_NAME, "vc_column-inner"))
)
source_code = driver.page_source
soup = BeautifulSoup(source_code, 'html.parser')
count = 1
for link in soup.find_all('a', {'class': 'vc_gitem-link'}):
listNotices.append(link.text + ' ')
linksNotices.append(link.get('href'))
# print(str(count) + ' ' + link.text)
# print(link.get('href'))
count += 1
if count > 10:
break
finally:
driver.quit()
withlinks = [listNotices, linksNotices]
return withlinks
def uiu_top10_notice(self):
listNotices = []
linksNotices = []
url = 'http://www.uiu.ac.bd/notices/'
source_code = urllib.request.urlopen(url)
soup = BeautifulSoup(source_code.read(), 'html.parser')
count = 1
for souplink in soup.find_all('h2', {'class': 'entry-title'}):
link = souplink.find('a')
listNotices.append(link.text + ' ')
linksNotices.append(link.get('href'))
# print(str(count) + ' ' + link.text)
# print(link.get('href'))
count += 1
if count > 10:
break
withlinks = [listNotices, linksNotices]
return withlinks
def seu_top10_notice(self):
listNotices = []
linksNotices = []
url = 'https://www.seu.edu.bd/notice_board.php'
source_code = urllib.request.urlopen(url)
soup = BeautifulSoup(source_code.read(), 'html.parser')
count = 1
for link in soup.find_all('a', {'rel': 'facebox'}):
listNotices.append(link.text + ' ')
linksNotices.append(url + link.get('href'))
# print(str(count) + ' ' + link.text)
# print(url + link.get('href'))
count += 1
if count > 10:
break
withlinks = [listNotices, linksNotices]
return withlinks
def uniList(self):
return ['North South University(NSU)', 'American International University-Bangladesh(AIUB)', 'BRAC University(BRACU)', 'East West University(EWU)', 'Independent University, Bangladesh(IUB)', 'International University of Business Agriculture and Technology (IUBAT)', 'United International University(UIU)', 'Southeast University(SEU)']
def seu_top10_notice007(self):
listNotices = []
linksNotices = []
withlinks = []
url = 'http://www.seu.ac.bd/notice_board.php'
source_code = urllib.request.urlopen(url)
soup = BeautifulSoup(source_code.read(), 'html.parser')
count = 1
for link in soup.find_all('a', {'rel': 'facebox'}):
listNotices.append(link.text + ' ')
linksNotices.append(url + link.get('href'))
# print(str(count) + ' ' + link.text)
# print(url + link.get('href'))
count += 1
if count > 10:
break
withlinks = [listNotices, linksNotices]
return withlinks
|
# Generated by Django 2.2.15 on 2020-08-15 06:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0009_auto_20200810_0726'),
]
operations = [
migrations.AddField(
model_name='details',
name='github',
field=models.URLField(default='https://data-flair.training/blogs/django-crud-example/'),
),
]
|
from django.db import models
# Create your models here.
class WeatherReports(models.Model):
class Meta:
db_table = 'weather_reports'
verbose_name = 'Погодные сводки'
DEFAULT_VALUE = {'pressure': 0, 'temperature': 0, 'humidity': 0, 'wind_speed': 0}
city = models.ForeignKey('api.Cities', verbose_name=u'Страна', db_constraint=False, related_name='city_weather',
db_index=False, on_delete=models.CASCADE, to_field='id', blank=False, null=False)
date = models.DateTimeField(verbose_name=u'Дата', blank=False, null=False)
values = models.JSONField(verbose_name=u'Параметры по часам', default=dict)
def save(self, *args, **kwargs):
if self.values is None:
self.values = self.get_default_values_dict()
elif not self.is_valid_struct_of_values():
raise ValueError("invalid structure of dict for field 'values' of model WeatherReports")
super(WeatherReports, self).save(*args, **kwargs)
def is_valid_struct_of_values(self):
if isinstance(self.values, dict) and list(self.values.keys()) == list(range(0, 24)):
try:
is_valid = all(value.keys() == self.DEFAULT_VALUE.keys() and
all(isinstance(v, int) for v in value.values())
for value in self.values.values())
return is_valid
except AttributeError:
pass
return False
def get_default_values_dict(self):
return {i: self.DEFAULT_VALUE for i in range(0, 24)}
class Countries(models.Model):
class Meta:
db_table = 'countries'
verbose_name = 'Страны'
name = models.CharField(max_length=50, verbose_name=u'Название страны', blank=False, null=False)
class Cities(models.Model):
class Meta:
db_table = 'cities'
verbose_name = 'Города'
country = models.ForeignKey('api.Countries', verbose_name=u'Город', related_name='country_cities',
db_constraint=False, db_index=False, on_delete=models.CASCADE, to_field='id',
blank=False, null=False)
name = models.CharField(max_length=50, verbose_name=u'Название города', blank=False, null=False)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import time
import signal
from seleniumwebtests import swt
def signal_handler(signal, frame):
print "\nKilling..."
swt.end()
sys.exit(0)
def main(options={}):
signal.signal(signal.SIGINT, signal_handler)
swt.set_options(options)
swt.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
"""File I/O"""
import json
def from_json_string(my_str):
"""File I/O"""
return json.loads(my_str)
|
import tflearn.datasets.oxflower17 as oxflower17
import numpy as np
class BatchDatset:
def __init__(self):
print("Initializing Batch Dataset Reader...")
self._read_images()
self.batch_offset = 0
self.epochs_completed = 0
def _read_images(self):
self.images, self.annotations = oxflower17.load_data()
self.image_mean = np.mean(self.images, axis=(1,2), keepdims=True)
self.images -= np.mean(self.images, axis=(1,2), keepdims=True)
def get_records(self):
return self.images, self.annotations
def reset_batch_offset(self, offset=0):
self.batch_offset = offset
def next_batch(self, batch_size):
start = self.batch_offset
self.batch_offset += batch_size
if self.batch_offset > len(self.images):
# Finished epoch
self.epochs_completed += 1
print("****************** Epochs completed: " + str(self.epochs_completed) + "******************")
# Shuffle the data
perm = np.arange(self.images.shape[0])
np.random.shuffle(perm)
self.images = self.images[perm]
self.annotations = self.annotations[perm]
# Start next epoch
start = 0
self.batch_offset = batch_size
end = self.batch_offset
# return self.images[start:end], self.annotations[start:end]
data = self.images[start:end]
labels = self.annotations[start:end]
return data, labels
def get_random_batch(self, batch_size):
indexes = np.random.randint(0, int(self.images.shape[0]), size=[batch_size]).tolist()
data = self.images[indexes]
labels = self.annotations[indexes]
return data, labels |
# [DP-Sequence-Action-Groups]
# https://leetcode.com/problems/largest-sum-of-averages/
# 813. Largest Sum of Averages
# https://www.youtube.com/watch?v=IPdShoUE9z8
# Related: 312. Burst Balloons
# We partition a row of numbers A into at most K adjacent (non-empty)
# groups, then our score is the sum of the average of each group. What is
# the largest score we can achieve?
#
# Note that our partition must use every number in A, and that scores are
# not necessarily integers.
#
# Example:
# Input:
# A = [9,1,2,3,9]
# K = 3
# Output: 20
# Explanation:
# The best choice is to partition A into [9], [1, 2, 3], [9]. The answer is
# 9 + (1 + 2 + 3) / 3 + 9 = 20.
# We could have also partitioned A into [9, 1], [2], [3, 9], for example.
# That partition would lead to a score of 5 + 2 + 6 = 13, which is worse.
#
#
# Note:
#
# 1 <= A.length <= 100.
# 1 <= A[i] <= 10000.
# 1 <= K <= A.length.
# Answers within 10^-6 of the correct answer will be accepted as correct.
class Solution(object):
def largestSumOfAverages(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: float
"""
# row: number of groups starting from 1
# column: first ith elements in A
dp = [None] * K
rolling_sum = [0] * len(A)
for k in range(K):
dp[k] = [float('-inf')] * len(A)
for k in range(K):
for i in range(len(A)):
if k == 0:
rolling_sum[i] = rolling_sum[i - 1] + A[i]
dp[k][i] = float(rolling_sum[i]) / (i + 1)
elif i >= k:
for j in range(k - 1, i):
dp[k][i] = max(
dp[k - 1][j] + float(
rolling_sum[i] - rolling_sum[j]) / (i - j),
dp[k][i],
)
return dp[K - 1][len(A) - 1]
|
from rest_framework import permissions
from users.models import Student
class IsTeacher(permissions.BasePermission):
def has_permission(self, request, view):
if request.user:
if request.user.role == "TE":
return True
else:
return False
else:
return False
class IsTeacherOrIsStudentReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
studentUser = Student.objects.filter(user=request.user).first()
if obj.students.all():
if studentUser in obj.students.all():
if request.method in permissions.SAFE_METHODS:
return True
return obj.teacher == request.user
class IsTeacherOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.teacher == request.user
class StudentReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
else:
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 18 16:29:40 2020
@author: tnye
"""
# Imports
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Reaad in dataframes
rt_df = pd.read_csv('/Users/tnye/tsuquakes/data/misc/melgar_hayes2017.csv')
sd_df = pd.read_csv('/Users/tnye/tsuquakes/data/misc/ye2016.csv')
# Obtain origin times form the dfs
rt_datetimes = np.array(rt_df['origin time'])
rt_USGSID = np.array(rt_df['#USGS ID'])
rt_types = np.array(rt_df['type'])
rt_mag = np.array(rt_df['Mw'])
rt_depths = np.array(rt_df['depth(km)'])
sd_dates = np.array(sd_df['Date'])
sd_times = np.array(sd_df['Time'])
# Obtain rise time and stress drops
all_rise_times = np.array(rt_df['rise time(s)'])
all_apparent_stress = np.array(sd_df['σa(MPa)'])
all_energy_stress2 = np.array(sd_df['ΔσE2.0(MPa)'])
all_energy_stress25 = np.array(sd_df['ΔσE2.0(MPa)'])
all_energy_stress3 = np.array(sd_df['ΔσE3.0(MPa)'])
# Initialize origin lists
rt_origins = np.array([])
sd_origins = np.array([])
# Loop through rise time df
for origin in rt_datetimes:
short_orig = origin.split('.')[0]
new_orig = short_orig.split(':')[0] + ':' + short_orig.split(':')[1]
rt_origins = np.append(rt_origins, new_orig)
# Loop through stress drop df
for i, date in enumerate(sd_dates):
yyyy = date.split('-')[0]
mth = date.split('-')[1]
dd = date.split('-')[2]
hr = sd_times[i].split(':')[0]
mm = sd_times[i].split(':')[1]
origin = yyyy + '-' + mth + '-' + dd + 'T' + hr + ':' + mm
sd_origins = np.append(sd_origins, origin)
# Find common events between both datasets
rise_times = []
apparent_stress = []
energy_stress2 = []
energy_stress25 = []
energy_stress3 = []
common_events = []
common_depths = []
common_mag = []
common_IDs = []
for i, element in enumerate(rt_origins):
# Only select megathrust events
if rt_types[i] == "i":
if element in sd_origins:
common_events.append(element.split('T')[0])
common_depths.append(rt_depths[i])
common_mag.append(rt_mag[i])
common_IDs.append(rt_USGSID[i])
# Find indexes of rise times and stress drops for common events
rt_ind = i
sd_ind = np.where(sd_origins == element)[0][0]
# Find rise times and stress drops for common events
rise_times.append(all_rise_times[rt_ind])
apparent_stress.append(all_apparent_stress[sd_ind])
energy_stress2.append(all_energy_stress2[sd_ind])
energy_stress25.append(all_energy_stress25[sd_ind])
energy_stress3.append(all_energy_stress3[sd_ind])
###################### Plot stress drop vs rise time ##########################
stress_types = [apparent_stress, energy_stress2, energy_stress25, energy_stress3]
for stress in stress_types:
########################### Find line of best fit #########################
coefficients = np.polyfit(np.log10(rise_times), np.log10(stress), 1)
polynomial = np.poly1d(coefficients)
log10_y_fit = polynomial(np.log10(rise_times))
# Calc R^2
correlation_matrix = np.corrcoef(np.log10(rise_times), np.log10(stress))
correlation_xy = correlation_matrix[0,1]
r2 = correlation_xy**2
r2 = r2_score(np.log10(stress), log10_y_fit)
############################### Make Plot #################################
if stress == apparent_stress:
ylabel = 'Apparent Stress(MPa)'
figname = 'RTvsAS.png'
elif stress == energy_stress2:
ylabel = 'Energy-Based Stress Drop 2.0(MPa)'
figname = 'RTvsES2.png'
elif stress == energy_stress25:
ylabel == 'Energy-Based Stress Drop 2.5(MPa)'
figname = 'RTvsES2_5.png'
elif stress == energy_stress3:
ylabel == 'Energy-Based Stress Drop 3.0(MPa)'
figname = 'RTvsES3.ong'
x = np.linspace(4,20)
fig = plt.figure(figsize=(10,15))
ax = plt.gca()
color_map = plt.cm.get_cmap('plasma').reversed()
im = ax.scatter(rise_times, stress, c=common_depths, cmap=color_map)
for i, event in enumerate(common_events):
ax.annotate(f'{common_IDs[i]}', (rise_times[i], stress[i]), size=6)
plt.plot(rise_times, 10**log10_y_fit)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('Rise Time (s)')
ax.set_ylabel(ylabel)
# Set up text box
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
textstr = '\n'.join((
f'log10(sd) = log10(rt) * {coefficients[0]} + {coefficients[1]}',
r'R2 = %.2f' % (r2, )))
plt.text(9, 1.7, textstr, fontsize=8, bbox=props)
# Set up colorbar
divider = make_axes_locatable(ax)
cax = divider.new_vertical(size='2.5%', pad=0.8, pack_start=True)
fig.add_axes(cax)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal')
cbar.set_label('Depth(km)')
cbar.ax.invert_yaxis()
# Save fig
plt.savefig(f'/Users/tnye/tsuquakes/plots/misc/{figname}.png', dpi=300)
plt.close()
|
# coding=utf-8
from pyspark import SparkContext, SparkConf
import json
import nltk
def fit(line):
#筛选glove语料库中的名词,专有名词和动词原形
vals = line.rstrip().split(' ')
word = vals[0]
f_word = nltk.pos_tag([word])[0]
if f_word[1] in ['NN','NNP','VB']:
return [(word,map(float, vals[1:]))]
else:
return []
# def fit2(line):
# #筛选处理后的glove语料库中的名词,专有名词和动词原形
# obj = json.loads(line)
# word = obj['key']
# f_word = nltk.pos_tag([word])[0]
# if f_word[1] in ['NN','NNP','VB']:
# return [(word,obj['value'])]
# else:
# return []
def list2file(l):
#将list内容保存到文件中
f = open('newvectors.txt','w')
for i in range(0,len(l)):
d = {}
d['key'] = l[i][0]
d['value'] = l[i][1]
f.write(json.dumps(d)+'\n')
f.close()
appName = "test"
master = "local"
conf = SparkConf().setAppName(appName).setMaster(master)
sc = SparkContext(conf=conf)
rdd = sc.textFile('vectors.txt')
rdd = rdd.flatMap(fit)
l = rdd.collect()
list2file(l)
|
from django.test import TestCase, RequestFactory, Client
from django.contrib.auth.models import AnonymousUser, User
import public_gate.views as views
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
class HomeBasicTests(TestCase):
def setUp(self):
# Every test needs access to the request factory.
self.factory = RequestFactory()
self.user = User.objects.create_user(
username='John Doe',
email='',
password='test')
self.c = Client()
def test_home_responds(self):
# Create an instance of a GET request.
request = self.factory.get('/public_gate/home/')
response = views.home(request)
return self.assertEqual(response.status_code, 200)
def test_plists_responds(self):
# Create an instance of a GET request.
request = self.factory.get('/public_gate/property_list/')
response = views.property_lists(request)
return self.assertEqual(response.status_code, 200)
def test_add_plist_select_responds(self):
# Create an instance of a GET request.
request = self.factory.get('/public_gate/property_list/add/')
response = views.add_property_list(request)
return self.assertEqual(response.status_code, 200)
def test_login(self):
# Create an instance of a GET request.
response = self.c.post("/login/", dict(login="John+Doe", password="test"), follow=True)
return self.assertEqual(response.status_code, 200) |
import gym
import numpy as np
from gym import wrappers
env = gym.make('CartPole-v1')
RANDOM_ACTION = 1
WEIGHT_BASED_ACTION = 2
def get_action(strategy, observation, weights):
if strategy == RANDOM_ACTION:
return env.action_space.sample()
elif strategy == WEIGHT_BASED_ACTION:
return 1 if np.dot(observation, weights) > 0 else 0
return None
bestLength = 0
bestWeights = np.zeros(4)
episode_lengths = []
# run 100 times with random initial weights. Each time, run 100 games to get average best length
for i in range(100):
# weight is from -1.0 to 1.0 to weight each parameter of the observation
# observation: cart position, cart velocity, pole Angle, velocity of pole at tip
# see this link: https://github.com/openai/gym/wiki/CartPole-v0
new_weights = np.random.uniform(-1.0, 1.0, 4)
length = []
# run for 100 games with different settings
for j in range(100):
observation = env.reset()
done = False
cnt = 0
# run one game until it ends.
while not done:
# env.render()
cnt += 1
# random approach in choosing action to move
action = get_action(WEIGHT_BASED_ACTION, observation, new_weights)
observation, reward, done, _ = env.step(action=action)
if done:
break
length.append(cnt)
# compute average game length of 100 games
average_length = float(sum(length) / len(length))
if average_length > bestLength:
bestLength = average_length
bestWeights = new_weights
episode_lengths.append(average_length)
if i % 10 == 0:
print('Best length is:', bestLength)
done = False
cnt = 0
env = wrappers.Monitor(env, "MovieFile2", force=True)
observation = env.reset()
while not done:
# env.render()
cnt += 1
# random approach in choosing action to move
action = get_action(WEIGHT_BASED_ACTION, observation, bestWeights)
observation, reward, done, _ = env.step(action=action)
if done:
break
print('game lasted ', cnt, 'moves') |
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# ****************************************************************************
# This module is part of the package GTW.OMP.SRM.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# GTW.OMP.SRM.Crew_Member
#
# Purpose
# Crew member of a `Boat_in_Regatta`
#
# Revision Dates
# 19-Apr-2010 (CT) Creation
# 13-Oct-2010 (CT) Derive from `Link2` instead of `Link1`
# 1-Dec-2010 (CT) `key` added
# 9-Feb-2011 (CT) `right.ui_allow_new` set to `True`
# 18-Nov-2011 (CT) Import `unicode_literals` from `__future__`
# 8-Aug-2012 (CT) Add `example`
# 12-May-2013 (CT) Replace `auto_cache` by `rev_ref_attr_name`
# 26-Aug-2014 (CT) Add `key.ui_rank`
# ««revision-date»»···
#--
from _GTW import GTW
from _MOM.import_MOM import *
import _GTW._OMP._PAP.Person
import _GTW._OMP._SRM.Boat_in_Regatta
import _GTW._OMP._SRM.Entity
from _TFL.I18N import _, _T, _Tn
_Ancestor_Essence = GTW.OMP.SRM.Link2
class Crew_Member (_Ancestor_Essence) :
"""Crew member of a `Boat_in_Regatta`."""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
### Primary attributes
class left (_Ancestor.left) :
"""`Boat_in_Regatta` the crew member sails on."""
role_type = GTW.OMP.SRM.Boat_in_Regatta
# end class left
class right (_Ancestor.right) :
"""Person which sails as crew member on `boat_in_regatta`"""
role_type = GTW.OMP.SRM.Sailor
rev_ref_attr_name = "_crew"
rev_ref_singular = True
ui_allow_new = True
# end class right
### Non-primary attributes
class key (A_Int) :
"""The crew members of a boat will be sorted by `key`, if
defined, by order of creation otherwise.
"""
kind = Attr.Optional
Kind_Mixins = (Attr.Sticky_Mixin, )
default = 0
example = 7
ui_rank = 10
# end class key
class role (A_String) :
"""Role of crew member."""
kind = Attr.Optional
example = _ ("trimmer")
max_length = 32
completer = Attr.Completer_Spec (1)
# end class role
# end class _Attributes
# end class Crew_Member
if __name__ != "__main__" :
GTW.OMP.SRM._Export ("*")
### __END__ GTW.OMP.SRM.Crew_Member
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.stats import chisquare
from sympy import *
import os
#This is just needed for later, you can skip the latex part
message = r""" %% AMS-LaTeX Created with the Wolfram Language : www.wolfram.com
\documentclass{article}
\usepackage{amsmath, amssymb, graphics, setspace}
\newcommand{\mathsym}[1]{{}}
\newcommand{\unicode}[1]{{}}
\newcounter{mathematicapage}
\begin{document}
\begin{doublespace}
\noindent\(\pmb{x\text{:=}\frac{y^2+b}{a};}\\
\pmb{\sigma _y\text{:=}0.05;}\\
\pmb{\sigma _a\text{:=}0.1;}\\
\pmb{\sigma _b\text{:=}0.4;}\\
\pmb{\sigma _x=D[x,y]^2*\sigma _y{}^2+D[x,a]^2*\sigma _a{}^2+D[x,b]^2*\sigma _b{}^2}\)
\end{doublespace}
\begin{doublespace}
\noindent\(\pmb{\frac{\text{{``}0.16{''}}}{a^2}+\frac{\text{{``}0.01{''}} y^2}{a^2}+\frac{\text{{``}0.01{''}} \left(b+y^2\right)^2}{a^4}}\\
\pmb{D[x,y]^2}\)
\end{doublespace}
\begin{doublespace}
\noindent\(\pmb{y\text{:=}1.23;}\\
\pmb{a\text{:=}0.7;}\\
\pmb{b\text{:=}6.7;}\\
\pmb{\text{Out}[38]}\)
\end{doublespace}
\begin{doublespace}
\noindent\(3.16672\)
\end{doublespace}
\begin{doublespace}
\noindent\(\pmb{\text{Sqrt}[\text{Out}[54]]}\)
\end{doublespace}
\begin{doublespace}
\noindent\(1.77953\)
\end{doublespace}
\end{document}
"""
currentDir=os.listdir()
extension = '.csv'
#Import funntion for data in directory
def importCSV(directory,ext):
global csv
csv = {}
files = []
for x in directory:
if ext in x:
csv[x[:-4]] = np.genfromtxt(x, delimiter=",")
files.append(x)
print('The following files have been imported: \n' + str(files)+'\n')
#functions for different opgaver
def f1(t,f,tau):
return np.exp(-t/tau)*np.sin(2*np.pi*f*t)
def f2(x,a,b):
return a*x**2 + b
def f3(x,c,d,e,f):
return c*x**2 + d*np.sin(e*x+f)
def f4(x,a,b):
return a*x**b
#Imports csv files in the current directory
importCSV(currentDir,extension)
print('####################################')
print('Welcome to opgave 1')
print('####################################\n')
f = 0.629
tau = 19.0
time = csv['exc1'][1:,0]
signal = csv['exc1'][1:,1]
error = csv['exc1'][1:,2]
print('Here we have f = '+str(f)+' and t = '+str(tau)+'\n')
print('generating plot!')
plt.figure(1)
line2, = plt.plot(time,f1(time,f,tau), label = "Theoretical Model")
line1 = plt.errorbar(time, signal,yerr = error, fmt='o', label = 'Raw Data')
plt.title('Signal as a function of time')
plt.xlabel('Time [s]')
plt.ylabel('Signal [mV]')
first_legend = plt.legend(handles = [line1], loc=1)
ax = plt.gca().add_artist(first_legend)
second_legend = plt.legend(handles = [line2], loc=4)
plt.savefig('foo.png')
#plt.show()
print('####################################')
print('Welcome to opgave 2')
print('####################################\n')
x = csv['exc2'][1:,0]
y = csv['exc2'][1:,1]
yeps = csv['exc2'][1:,2]
ydata = csv['exc2'][1:,1]+csv['exc2'][1:,2]
print('We want to fit the data to the model y=a x^2 + b')
popt, pcov = curve_fit(f2, x, y, sigma=yeps)
print('The best values for a & b \n')
print(popt)
print()
print('The best values for the variance on a & b\n')
print(np.diag(pcov))
#plt.figure(2)
#plt.plot(x,y,'ro')
#plt.plot(x,f2(x,*popt))
#plt.plot(x,f2(x,2.1,0.45))
#plt.show()
print()
print('####################################')
print('Welcome to opgave 3')
print('####################################\n')
print('We want to fit the data to the model y=c*x^2 + d sin(e*x+f)')
popt1, pcov1 = curve_fit(f3, x, y, sigma=yeps)
print('The best values for c, d, e & f\n')
print(popt1)
print('The best values for the variance on c, d, e & f\n')
print(np.diag(pcov1))
print()
print('####################################')
print('Welcome to opgave 4')
print('####################################\n')
print('Here we calculate the chisquare for opgave 2: ' +str(chisquare(x,f2(x,*popt))))
print('\n')
print('here we calculate it for opgave 3: ' + str(chisquare(x,f3(x,*popt1))))
print('####################################')
print('Welcome to opgave 5')
print('####################################\n')
f = open('calculations.tex','w')
f.write(message)
f.close
y=1.23
a=0.7
b=6.7
sigma_y=0.05
sigma_a=0.1
sigma_b=0.4
sigma_x=1.8
print('Lets use the error propagation law to calculate x with errors\n')
print('if for whatever reason you want to see the calculations i have generated a latex document with the calculations.')
print('we have: \n y = 1.23, sigma_y = 0.05 \n a = 0.7, sigma_a = 0.1 \n b = 6.7, sigma_b = 0.4')
x = (y**2+b)/a
print('we get that sigma_x = ' + str(sigma_x))
print('Therefore x = ' +str(round(x,1)) +'+-' + str(sigma_x)+'\n')
print('####################################')
print('Welcome to opgave 6')
print('####################################\n')
print('There is no text for this part! Please wait for the plots to be generated')
a = 0.2
b = 1.9
x = csv['exc6'][1:,0]
y = csv['exc6'][1:,1]
e = csv['exc6'][1:,2]
model=f4(x,a,b)
res = model - y
plt.figure(3)
plt.errorbar(x,res,yerr=e,fmt='ro')
plt.plot([0, 18], [0, 0], 'k--', lw=2)
plt.title('Residual plot')
plt.xlabel('x')
plt.ylabel('Residual')
a = plt.axes([0.65, 0.6, 0.2, 0.2])
n, bins, patches = plt.hist(res, 30, normed=1)
plt.title('Histogram')
plt.xticks([])
plt.yticks([])
print('####################################')
print('Welcome to opgave 7')
print('####################################\n')
x = csv['exc7'][1:,0]
y = csv['exc7'][1:,1]
mean = np.mean(x)
std = np.std(x)
weightedList=[]
print('The mean of the values are ' + str(mean))
print('The std of the values are ' + str(std) + '\n')
#plt.figure(4)
#n, bins, patches = plt.hist(x,50,normed=1, color='g')
#plt.axvline(x.mean(), color='k', linestyle='dashed', linewidth=3)
print('We need to remove the values that are less than 3x'+ str(round(std,1)) +
' and the values that are greater that 3x' +str(round(std,1))+ '\n')
for i in x:
if i > mean-3*std and i < mean+3*std:
weightedList.append(i)
#print(weightedList)
#f = open('test.csv','w')
#for r in weightedList:
# f.write(str(r)+'\n')
#f.close
newmean = np.mean(weightedList)
newstd = np.std(weightedList)
print('The new mean and new std are in order ' + str(newmean) + ' and ' + str(newstd))
print('\n\n\n#########################')
print('This is the end, thanks for running this script')
print('Regards Christopher Carman')
print('#########################')
plt.show()
|
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.schema import (
CheckConstraint,
Column,
ForeignKey,
Table,
UniqueConstraint,
)
from sqlalchemy.types import JSON, Integer, String
from iheroes_api.infra.database.models.user import User
from iheroes_api.infra.database.sqlalchemy import metadata
Hero = Table(
"hero",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey(User.c.id), nullable=False),
Column(
"name", String(100), nullable=False, default="Unknown", server_default="Unknown"
),
Column("nickname", String(100), nullable=False),
Column("power_class", ENUM("S", "A", "B", "C", name="power_class"), nullable=False),
Column("location", JSON, nullable=False),
CheckConstraint("length(name) >= 1 AND length(name) <= 100", name="name_length"),
CheckConstraint(
"length(nickname) >= 1 AND length(nickname) <= 100", name="nickname_length"
),
UniqueConstraint("name", "nickname", name="name_nickname"),
)
|
class PriorityQueueADT:
def add(self, key, value):
raise NotImplementedError
def min(self):
raise NotImplementedError
def remove_min(self):
raise NotImplementedError
def is_empty(self):
return len(self) == 0
def __len__(self):
raise NotImplementedError |
import string
import random
import numpy as np
for r in range(3):
for size in [100, 1000, 10000, 100000, 1000000, 10000000]:
if r == 0:
stream_list = [''.join(random.choice(string.ascii_lowercase)) for _ in range(size)]
else:
d = np.random.normal(13, 4.5, size).astype(np.int)
# stream_list = [string.ascii_lowercase[i] for i in d]
stream_list = [string.ascii_lowercase[i] if i >= 0 and i < len(string.ascii_lowercase) else
random.choice(
string.ascii_lowercase) for i in d]
with open(str(size) + "-" + ("Uniform" if r == 0 else "Normal") + " Distribution" + ".txt", "w") as file01:
for s in stream_list:
file01.write(s)
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import chainer
from chainer import training
from chainer.training import extensions
import dataset
from models.vgg16 import VGG16
from models.generators import FCN32s, FCN16s, FCN8s
from models.discriminators import (
LargeFOV, LargeFOVLight, SmallFOV, SmallFOVLight, SPPDiscriminator)
from updater import GANUpdater, NonAdversarialUpdater
from extensions import TestModeEvaluator
import utils
def parse_args(generators, discriminators, updaters):
parser = argparse.ArgumentParser(description='Semantic Segmentation using Adversarial Networks')
parser.add_argument('--generator', choices=generators.keys(), default='fcn32s',
help='Generator(segmentor) architecture')
parser.add_argument('--discriminator', choices=discriminators.keys(), default='largefov',
help='Discriminator architecture')
parser.add_argument('--updater', choices=updaters.keys(), default='gan',
help='Updater')
parser.add_argument('--initgen_path', default='pretrained_model/vgg16.npz',
help='Pretrained model of generator')
parser.add_argument('--initdis_path', default=None,
help='Pretrained model of discriminator')
parser.add_argument('--batchsize', '-b', type=int, default=1,
help='Number of images in each mini-batch')
parser.add_argument('--iteration', '-i', type=int, default=100000,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='snapshot',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--evaluate_interval', type=int, default=1000,
help='Interval of evaluation')
parser.add_argument('--snapshot_interval', type=int, default=10000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=10,
help='Interval of displaying log to console')
return parser.parse_args()
def load_pretrained_model(initmodel_path, initmodel, model, n_class, device):
print('Initializing the model')
chainer.serializers.load_npz(initmodel_path, initmodel)
utils.copy_chainermodel(initmodel, model)
return model
def make_optimizer(model, lr=1e-10, momentum=0.99):
optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005), 'hook_dec')
return optimizer
def main():
generators = {
'fcn32s': (FCN32s, VGG16, 1e-10), # (model, initmodel, learning_rate)
'fcn16s': (FCN16s, FCN32s, 1e-12),
'fcn8s': (FCN8s, FCN16s, 1e-14),
}
discriminators = {
'largefov': (LargeFOV, LargeFOV, 0.1, 1.0), # (model, initmodel, learning_rate, L_bce_weight)
'largefov-light': (LargeFOVLight, LargeFOVLight, 0.1, 1.0),
'smallfov': (SmallFOV, SmallFOV, 0.1, 0.1),
'smallfov-light': (SmallFOVLight, SmallFOVLight, 0.2, 1.0),
'sppdis': (SPPDiscriminator, SPPDiscriminator, 0.1, 1.0),
}
updaters = {
'gan': GANUpdater,
'standard': NonAdversarialUpdater
}
args = parse_args(generators, discriminators, updaters)
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# iteration: {}'.format(args.iteration))
# dataset
train = dataset.PascalVOC2012Dataset('train')
val = dataset.PascalVOC2012Dataset('val')
n_class = len(train.label_names)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
val_iter = chainer.iterators.SerialIterator(val, args.batchsize, repeat=False, shuffle=False)
# Set up a neural network to train and an optimizer
if args.updater=='gan':
gen_cls, initgen_cls, gen_lr = generators[args.generator]
dis_cls, initdis_cls, dis_lr, L_bce_weight = discriminators[args.discriminator]
print('# generator: {}'.format(gen_cls.__name__))
print('# discriminator: {}'.format(dis_cls.__name__))
print('')
# Initialize generator
if args.initgen_path:
gen, initgen = gen_cls(n_class), initgen_cls(n_class)
gen = load_pretrained_model(args.initgen_path, initgen, gen, n_class, args.gpu)
else:
gen = gen_cls(n_class)
# Initialize discriminator
if args.initdis_path:
dis, initdis = dis_cls(n_class), initdis_cls(n_class)
dis = load_pretrained_model(args.initdis_path, initdis, dis, n_class, args.gpu)
else:
dis = dis_cls(n_class)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
gen.to_gpu() # Copy the model to the GPU
dis.to_gpu()
opt_gen = make_optimizer(gen, gen_lr)
opt_dis = make_optimizer(dis, dis_lr)
model={'gen':gen,'dis':dis}
optimizer={'gen': opt_gen, 'dis': opt_dis}
elif args.updater=='standard':
model_cls, initmodel_cls, lr = generators[args.generator]
L_bce_weight = None
print('# model: {}'.format(model_cls.__name__))
print('')
if args.initgen_path:
model, initmodel = model_cls(n_class), initmodel_cls(n_class)
model = load_pretrained_model(args.initgen_path, initmodel, model, n_class, args.gpu)
else:
model = model_cls(n_class)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current
model.to_gpu() # Copy the model to the GPU
optimizer = make_optimizer(model, lr)
# Set up a trainer
updater = updaters[args.updater](
model=model,
iterator=train_iter,
optimizer=optimizer,
device=args.gpu,
L_bce_weight=L_bce_weight,
n_class=n_class,)
trainer = training.Trainer(updater, (args.iteration, 'iteration'), out=args.out)
evaluate_interval = (args.evaluate_interval, 'iteration')
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(
TestModeEvaluator(
val_iter, updater, device=args.gpu),
trigger=snapshot_interval,
invoke_before_training=False)
trainer.extend(
extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'),
trigger=snapshot_interval)
if args.updater=='gan':
trainer.extend(extensions.snapshot_object(
gen, 'gen_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'iteration',
'gen/loss', 'validation/gen/loss',
'dis/loss',
'gen/accuracy', 'validation/gen/accuracy',
'gen/iu', 'validation/gen/iu',
'elapsed_time',
]), trigger=display_interval)
elif args.updater=='standard':
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'iteration',
'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy',
'main/iu', 'validation/main/iu',
'elapsed_time',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=1))
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
print('\nRun the training')
trainer.run()
if __name__ == '__main__':
main()
|
import time
from multiprocessing import Process
def ask_user():
start = time.time()
ask_usr = input("Enter your name")
print(f"Hello, {ask_usr}")
print(f"ask_usr {time.time() - start}")
def do_math():
start = time.time()
print("Start calculation...")
[i**2 for i in range(2000000)]
print(f"Calculataion time {time.time() - start}")
start = time.time()
process1 = Process(target=do_math)
process2 = Process(target=ask_user)
process1.start()
process2.start()
process1.join()
process2.join()
print(f"Total time taken by 2 process: {time.time() - start}") |
# coding: cp949
# print("기본 if문법") # if, for, while 없이 단독으로 indentation이 불가능
money = True
#if money:
#print("택시를 타고 가라") # if 이하에는 반드시 1개 이상의 statement가 있어야 한다.
#if money:
# print("택시를 타고 가라") # indentation은 공백, 탭 모두 허용한다.
#if money:
# print("현금이 있는것으로 확인 되었음") #동일한 indentation으로 구성된 statement는
# print("택시타고,,") # 같은 statement block을 형성한다.
#if money:
# print("현금이 있는것으로 확인 되었음") #동일한 indentation으로 맞춰야한다
# print("택시타고,,")
#else:
# print("걸어 가라")
if money:
print("현금이 있는것으로 확인 되었음")
print("택시타고,,")
else:
print("현금이 없네요")
print("걸어가세요")
print("프로그램을 종료합니다") # if, else statement block과 상관없는 최상위 레벨의 statement
|
import model
import view
import pygame
"""
This is controler.
"""
game_engine = model.GameEngine()
graphical_view = view.GraphicalView(game_engine)
while game_engine.running:
#pass event to model and view
for event in pygame.event.get():
graphical_view.notify(event)
game_engine.notify(event)
game_engine.update()
graphical_view.update()
|
"""Contains the base class for flippers."""
import copy
from mpf.core.device_monitor import DeviceMonitor
from mpf.devices.driver import ReconfiguredDriver
from mpf.core.system_wide_device import SystemWideDevice
from mpf.devices.switch import ReconfiguredSwitch
@DeviceMonitor(_enabled="enabled")
class Flipper(SystemWideDevice):
"""Represents a flipper in a pinball machine. Subclass of Device.
Contains several methods for actions that can be performed on this flipper,
like :meth:`enable`, :meth:`disable`, etc.
Flippers have several options, including player buttons, EOS swtiches,
multiple coil options (pulsing, hold coils, etc.)
Args:
machine: A reference to the machine controller instance.
name: A string of the name you'll refer to this flipper object as.
"""
config_section = 'flippers'
collection = 'flippers'
class_label = 'flipper'
def __init__(self, machine, name):
"""Initialise flipper."""
super().__init__(machine, name)
self.main_coil = None
self.hold_coil = None
self.switch = None
self.eos_switch = None
self._enabled = False
def _initialize(self):
if "debounce" not in self.config['switch_overwrite']:
self.config['switch_overwrite']['debounce'] = "quick"
if "debounce" not in self.config['eos_switch_overwrite']:
self.config['eos_switch_overwrite']['debounce'] = "quick"
self.platform = self.config['main_coil'].platform
self.switch = ReconfiguredSwitch(self.config['activation_switch'],
self.config['switch_overwrite'],
False)
self._reconfigure_drivers()
if self.config['eos_switch']:
self.eos_switch = ReconfiguredSwitch(self.config['eos_switch'],
self.config['eos_switch_overwrite'],
False)
self.debug_log('Platform Driver: %s', self.platform)
if self.config['power_setting_name']:
self.machine.events.add_handler("machine_var_{}".format(self.config['power_setting_name']),
self._power_changed)
self.debug_log('Platform Driver: %s', self.platform)
if self.config['include_in_ball_search']:
self.config['playfield'].ball_search.register(
self.config['ball_search_order'], self._ball_search, self.name)
def _reconfigure_drivers(self):
self.main_coil = self._reconfigure_driver(self.config['main_coil'], self.config['main_coil_overwrite'])
if self.config['hold_coil']:
self.hold_coil = self._reconfigure_driver(self.config['hold_coil'], self.config['hold_coil_overwrite'])
def _reconfigure_driver(self, driver, overwrite_config):
if self.config['power_setting_name']:
overwrite_config = copy.deepcopy(overwrite_config)
pulse_ms = driver.config.get(
"pulse_ms", overwrite_config.get("pulse_ms", self.machine.config['mpf']['default_pulse_ms']))
settings_factor = self.machine.settings.get_setting_value(self.config['power_setting_name'])
overwrite_config['pulse_ms'] = int(pulse_ms * settings_factor)
self.info_log("Configuring driver %s with a pulse time of %s ms for flipper",
driver.name, overwrite_config['pulse_ms'])
return ReconfiguredDriver(driver, overwrite_config)
def _power_changed(self, **kwargs):
del kwargs
self._reconfigure_drivers()
def enable(self, **kwargs):
"""Enable the flipper by writing the necessary hardware rules to the hardware controller.
The hardware rules for coils can be kind of complex given all the
options, so we've mapped all the options out here. We literally have
methods to enable the various rules based on the rule letters here,
which we've implemented below. Keeps it easy to understand. :)
Note there's a platform feature saved at:
self.machine.config['platform']['hw_enable_auto_disable']. If True, it
means that the platform hardware rules will automatically disable a coil
that has been enabled when the trigger switch is disabled. If False, it
means the hardware platform needs its own rule to disable the coil when
the switch is disabled. Methods F and G below check for that feature
setting and will not be applied to the hardware if it's True.
Two coils, using EOS switch to indicate the end of the power stroke:
Rule Type Coil Switch Action
A. Enable Main Button active
D. Enable Hold Button active
E. Disable Main EOS active
One coil, using EOS switch (not implemented):
Rule Type Coil Switch Action
A. Enable Main Button active
H. PWM Main EOS active
Two coils, not using EOS switch:
Rule Type Coil Switch Action
B. Pulse Main Button active
D. Enable Hold Button active
One coil, not using EOS switch:
Rule Type Coil Switch Action
C. Pulse/PWM Main button active
Use EOS switch for safety (for platforms that support mutiple switch
rules). Note that this rule is the letter "i", not a numeral 1.
I. Enable power if button is active and EOS is not active
"""
del kwargs
# prevent duplicate enable
if self._enabled:
return
self._enabled = True
self.debug_log('Enabling flipper with config: %s', self.config)
# Apply the proper hardware rules for our config
if not self.config['hold_coil']: # single coil
self._enable_single_coil_rule()
elif not self.config['use_eos']: # two coils, no eos
self._enable_main_coil_pulse_rule()
self._enable_hold_coil_rule()
else: # two coils, cutoff main on EOS
self._enable_main_coil_eos_cutoff_rule()
self._enable_hold_coil_rule()
# todo detect bad EOS and program around it
def disable(self, **kwargs):
"""Disable the flipper.
This method makes it so the cabinet flipper buttons no longer control
the flippers. Used when no game is active and when the player has
tilted.
"""
del kwargs
self.debug_log("Disabling")
self.main_coil.clear_hw_rule(self.switch)
if self.eos_switch and self.config['use_eos']:
self.main_coil.clear_hw_rule(self.eos_switch)
if self.hold_coil:
self.hold_coil.clear_hw_rule(self.switch)
self._enabled = False
def _enable_single_coil_rule(self):
self.debug_log('Enabling single coil rule')
self.main_coil.set_pulse_on_hit_and_enable_and_release_rule(self.switch)
def _enable_main_coil_pulse_rule(self):
self.debug_log('Enabling main coil pulse rule')
self.main_coil.set_pulse_on_hit_and_release_rule(self.switch)
def _enable_hold_coil_rule(self):
self.debug_log('Enabling hold coil rule')
# TODO: why are we pulsing the hold coil?
self.hold_coil.set_pulse_on_hit_and_enable_and_release_rule(self.switch)
def _enable_main_coil_eos_cutoff_rule(self):
self.debug_log('Enabling main coil EOS cutoff rule')
self.main_coil.set_pulse_on_hit_and_enable_and_release_and_disable_rule(
self.switch, self.eos_switch)
def sw_flip(self, include_switch=False):
"""Activate the flipper via software as if the flipper button was pushed.
This is needed because the real flipper activations are handled in
hardware, so if you want to flip the flippers with the keyboard or OSC
interfaces, you have to call this method.
Note this method will keep this flipper enabled until you call
sw_release().
"""
if include_switch:
self.machine.switch_controller.process_switch(
name=self.config['activation_switch'].name,
state=1,
logical=True)
if self.config['hold_coil']:
self.config['main_coil'].pulse()
self.config['hold_coil'].enable()
else:
self.config['main_coil'].enable()
def sw_release(self, include_switch=False):
"""Deactive the flipper via software as if the flipper button was released.
See the documentation for sw_flip() for details.
"""
if include_switch:
self.machine.switch_controller.process_switch(
name=self.config['activation_switch'].name,
state=0,
logical=True)
# disable the flipper coil(s)
self.config['main_coil'].disable()
if self.config['hold_coil']:
self.config['hold_coil'].disable()
def _ball_search(self, phase, iteration):
del phase
del iteration
self.sw_flip()
self.machine.delay.add(self.config['ball_search_hold_time'],
self.sw_release,
'flipper_{}_ball_search'.format(self.name))
return True
|
"""
Functions for generating distance restraints from
evolutionary couplings and secondary structure predictions
Authors:
Thomas A. Hopf
Anna G. Green (docking restraints)
"""
from pkg_resources import resource_filename
from evcouplings.utils.config import read_config_file
from evcouplings.utils.constants import AA1_to_AA3
from evcouplings.utils.system import verify_resources
def _folding_config(config_file=None):
"""
Load CNS folding configuration
Parameters
----------
config_file: str, optional (default: None)
Path to configuration file. If None,
loads default configuration included
with package.
Returns
-------
dict
Loaded configuration
"""
if config_file is None:
# get path of config within package
config_file = resource_filename(
__name__, "cns_templates/restraints.yml"
)
# check if config file exists and read
verify_resources(
"Folding config file does not exist or is empty", config_file
)
return read_config_file(config_file)
def _docking_config(config_file=None):
"""
Load docking configuration
Parameters
----------
config_file: str, optional (default: None)
Path to configuration file. If None,
loads default configuration included
with package.
Returns
-------
dict
Loaded configuration
"""
if config_file is None:
# get path of config within package
config_file = resource_filename(
__name__, "cns_templates/haddock_restraints.yml"
)
# check if config file exists and read
verify_resources(
"Folding config file does not exist or is empty", config_file
)
return read_config_file(config_file)
def secstruct_dist_restraints(residues, output_file,
restraint_formatter, config_file=None,
secstruct_column="sec_struct_3state"):
"""
Create .tbl file with distance restraints
based on secondary structure prediction
Logic based on choose_CNS_constraint_set.m,
lines 519-1162
Parameters
----------
residues : pandas.DataFrame
Table containing positions (column i), residue
type (column A_i), and secondary structure for
each position
output_file : str
Path to file in which restraints will be saved
restraint_formatter : function
Function called to create string representation of restraint
config_file : str, optional (default: None)
Path to config file with folding settings. If None,
will use default settings included in package
(restraints.yml).
secstruct_column : str, optional (default: sec_struct_3state)
Column name in residues dataframe from which secondary
structure will be extracted (has to be H, E, or C).
"""
def _range_equal(start, end, char):
"""
Check if secondary structure substring consists
of one secondary structure state
"""
range_str = "".join(
[secstruct[pos] for pos in range(start, end + 1)]
)
return range_str == len(range_str) * char
# get configuration (default or user-supplied)
cfg = _folding_config(config_file)["secstruct_distance_restraints"]
# extract amino acids and secondary structure into dictionary
secstruct = dict(zip(residues.i, residues[secstruct_column]))
aa = dict(zip(residues.i, residues.A_i))
i_min = residues.i.min()
i_max = residues.i.max()
weight = cfg["weight"]
with open(output_file, "w") as f:
# go through secondary structure elements
for sse, name in [("E", "strand"), ("H", "helix")]:
# get distance restraint subconfig for current
# secondary structure state
sse_cfg = cfg[name]
# define distance constraints based on increasing
# sequence distance, and test if the secondary structure
# element reaches out that far. Specific distance restraints
# are defined in config file for each sequence_dist
for seq_dist, atoms in sorted(sse_cfg.items()):
# now look at each position and the secondary
# structure upstream to define the appropriate restraints
for i in range(i_min, i_max - seq_dist + 1):
j = i + seq_dist
# test if upstream residues all have the
# same secondary structure state
if _range_equal(i, j, sse):
# go through all atom pairs and put constraints on them
for (atom1, atom2), (dist, range_) in atoms.items():
# can't put CB restraint if residue is a glycine
if ((atom1 == "CB" and aa[i] == "G") or
(atom2 == "CB" and aa[j] == "G")):
continue
# write distance restraint
r = restraint_formatter(
i, atom1, j, atom2,
dist=dist,
lower=range_,
upper=range_,
weight=weight,
comment=AA1_to_AA3[aa[i]] + " " + AA1_to_AA3[aa[j]]
)
f.write(r + "\n")
def secstruct_angle_restraints(residues, output_file,
restraint_formatter, config_file=None,
secstruct_column="sec_struct_3state"):
"""
Create .tbl file with dihedral angle restraints
based on secondary structure prediction
Logic based on make_cns_angle_constraints.pl
Parameters
----------
residues : pandas.DataFrame
Table containing positions (column i), residue
type (column A_i), and secondary structure for
each position
output_file : str
Path to file in which restraints will be saved
restraint_formatter : function, optional
Function called to create string representation of restraint
config_file : str, optional (default: None)
Path to config file with folding settings. If None,
will use default settings included in package
(restraints.yml).
secstruct_column : str, optional (default: sec_struct_3state)
Column name in residues dataframe from which secondary
structure will be extracted (has to be H, E, or C).
"""
def _phi(pos, sse):
sse_cfg = cfg[sse]["phi"]
return restraint_formatter(
pos, "C",
pos + 1, "N",
pos + 1, "CA",
pos + 1, "C",
**sse_cfg
)
def _psi(pos, sse):
sse_cfg = cfg[sse]["psi"]
return restraint_formatter(
pos, "N",
pos, "CA",
pos, "C",
pos + 1, "N",
**sse_cfg
)
# get configuration (default or user-supplied)
cfg = _folding_config(config_file)["secstruct_angle_restraints"]
# extract amino acids and secondary structure into dictionary
secstruct = dict(zip(residues.i, residues[secstruct_column]))
aa = dict(zip(residues.i, residues.A_i))
i_min = residues.i.min()
i_max = residues.i.max()
with open(output_file, "w") as f:
# go through all positions
for i in range(i_min, i_max - 1):
# check if two subsequent identical secondary structure states
# helix
if secstruct[i] == "H" and secstruct[i + 1] == "H":
f.write(_phi(i, "helix") + "\n")
f.write(_psi(i, "helix") + "\n")
# strand
elif secstruct[i] == "E" and secstruct[i + 1] == "E":
f.write(_phi(i, "strand") + "\n")
f.write(_psi(i, "strand") + "\n")
def ec_dist_restraints(ec_pairs, output_file,
restraint_formatter, config_file=None):
"""
Create .tbl file with distance restraints
based on evolutionary couplings
Logic based on choose_CNS_constraint_set.m,
lines 449-515
Parameters
----------
ec_pairs : pandas.DataFrame
Table with EC pairs that will be turned
into distance restraints
(with columns i, j, A_i, A_j)
output_file : str
Path to file in which restraints will be saved
restraint_formatter : function
Function called to create string representation of restraint
config_file : str, optional (default: None)
Path to config file with folding settings. If None,
will use default settings included in package
(restraints.yml).
"""
# get configuration (default or user-supplied)
cfg = _folding_config(config_file)["pair_distance_restraints"]
with open(output_file, "w") as f:
# create distance restraints per EC row in table
for idx, ec in ec_pairs.iterrows():
i, j, aa_i, aa_j = ec["i"], ec["j"], ec["A_i"], ec["A_j"]
for type_ in ["c_alpha", "c_beta", "tertiary_atom"]:
tcfg = cfg[type_]
# check if we want this type of restraint first
if not tcfg["use"]:
continue
# restraint weighting: currently only support none,
# or fixed numerical value
if isinstance(tcfg["weight"], str):
# TODO: implement restraint weighting functions eventually
raise NotImplementedError(
"Restraint weighting functions not yet implemented: " +
tcfg["weight"]
)
else:
weight = tcfg["weight"]
# determine which atoms to put restraint on
# can be residue-type specific dict or fixed value
atoms = tcfg["atoms"]
if isinstance(atoms, dict):
atom_i = atoms[aa_i]
atom_j = atoms[aa_j]
else:
atom_i = atoms
atom_j = atoms
# skip if we would put a CB restraint on glycine residues;
# this should be generalized to skip any invalid selection eventually
if ((aa_i == "G" and atom_i == "CB") or
(aa_j == "G" and atom_j == "CB")):
continue
# write restraint
r = restraint_formatter(
i, atom_i, j, atom_j,
dist=tcfg["dist"],
lower=tcfg["lower"],
upper=tcfg["upper"],
weight=weight,
comment=AA1_to_AA3[aa_i] + " " + AA1_to_AA3[aa_j]
)
f.write(r + "\n")
def docking_restraints(ec_pairs, output_file,
restraint_formatter, config_file=None):
"""
Create .tbl file with distance restraints
for docking
Parameters
----------
ec_pairs : pandas.DataFrame
Table with EC pairs that will be turned
into distance restraints
(with columns i, j, A_i, A_j, segment_i, segment_j)
output_file : str
Path to file in which restraints will be saved
restraint_formatter : function
Function called to create string representation of restraint
config_file : str, optional (default: None)
Path to config file with folding settings. If None,
will use default settings included in package
(restraints.yml).
"""
# get configuration (default or user-supplied)
cfg = _docking_config(config_file)["docking_restraints"]
with open(output_file, "w") as f:
# create distance restraints per EC row in table
for idx, ec in ec_pairs.iterrows():
i, j, aa_i, aa_j, segment_i, segment_j = (
ec["i"], ec["j"], ec["A_i"], ec["A_j"], ec["segment_i"], ec["segment_j"]
)
# extract chain names based on segment names
# A_1 -> A, B_1 -> B
chain_i = segment_i[0]
chain_j = segment_j[0]
# write i to j restraint
r = restraint_formatter(
i, chain_i, j, chain_j,
dist=cfg["dist"],
lower=cfg["lower"],
upper=cfg["upper"],
)
f.write(r + "\n")
|
import nacl.encoding
import nacl.signing
bob_priv_key = nacl.signing.SigningKey.generate()
bob_pub_key = bob_priv_key.verify_key
bob_pub_key_hex = bob_pub_key.encode(encoder=nacl.encoding.HexEncoder)
print(f"Bob Public Key: {bob_pub_key_hex}")
signed = bob_priv_key.sign(b"Some important message")
print(signed) |
# For Linked List problems that need nodes to be rearranged
# use this implementaion of linked list that has a head as well as tail pointer.
# append method has different uses, so practice it.
# Linked List Implementation
# for problems where nodes have to be rearranged
class Node:
def __init__(self,data):
self.data = data
self.next = None
def get_data(self):
return self.data
def set_data(self,data):
self.data = data
def get_next(self):
return self.next
def set_next(self,node):
self.next = node
class LinkedList:
def __init__(self):
self.head=None
self.tail = None
def get_head(self):
return self.head
def get_tail(self):
return self.tail
def set_head(self,node):
self.head = node
def set_tail(self,node):
self.tail = node
def append(self,node):
# add node at tail
if self.head is None:
self.head = node
else:
self.tail.next=node
self.tail = node
ll = LinkedList()
node1 = Node(0)
node2 = Node(1)
node3 = Node(0)
node4 = Node(2)
node1.next = node2
node2.next = node3
node3.next = node4
ll.set_head(node1)
ll.set_tail(node4)
ll.append(Node(1))
# sort_list(ll)
|
import copy
class Solution:
def letterCombinations(self, digits: str):
"""
由键盘字符得到对应的字符列表比较容易,关键是怎么进行组合,如果输入字符太多,循环次数太多
"""
# 用字典就好了,按ASCII码计算,行不通,应为有'7' '9'这两个例外
# lettersList = []
# for digit in digits:
# if int(digit) <= 6:
# letters = [chr(k) for k in range(ord('a')+3*(int(digit)-2), ord('a')+3*(int(digit)-2)+3)]
# elif int(digit) == 7:
# letters = ['p', 'q', 'r', 's']
# elif int(digit) == 8:
# letters = ['t', 'u', 'v']
# else:
# letters = ['w', 'x', 'y', 'z']
# lettersList.append(letters)
# 参考别人的解法
keys = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv',
'9': 'wxyz'}
words = [keys[key] for key in digits if key in keys]
print(words)
if len(words) == 0:
return
ans = [a for a in words[0]]
for i in range(1, len(words)):
temp = ans
for j in range(len(words[i])):
a = [_ + words[i][j] for _ in temp]
# 第一个要替代,不然会留下字母较少的
if j == 0:
ans = a
else:
ans += a
return ans
so = Solution()
print(so.letterCombinations('923'))
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Exhaust_Fan_Enable_Command import Exhaust_Fan_Enable_Command
class AHU_Exhaust_Fan_Enable_Command(Exhaust_Fan_Enable_Command):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').AHU_Exhaust_Fan_Enable_Command
|
# -*- encoding: UTF-8
import unittest
import z3
import libirpy
import libirpy.unittest
import libirpy.solver as solver
import libirpy.util as util
import nickel.unwindings as ni
import datatypes as dt
import spec
import spec.label as l
import state
import ctx
from prototypes import proto
TestCase = libirpy.unittest.IrpyTestCase
def flatten(lst):
out = []
for i in lst:
if isinstance(i, tuple):
out.extend(i)
else:
out.append(i)
return tuple(out)
def spec_args(args):
spec_args = []
for arg in args:
if z3.is_expr(arg) and arg.sexpr().startswith('(concat'):
spec_args.append(arg.children()[::-1])
else:
spec_args.append(arg)
return spec_args
class NistarMeta(type):
def __new__(cls, name, parents, dct):
cls._add_syscalls(name, parents, dct)
return super(NistarMeta, cls).__new__(cls, name, parents, dct)
@classmethod
def _add_syscalls(cls, name, parents, dct):
for syscall in proto.keys():
cls._add_syscall(dct, syscall)
@classmethod
def _add_syscall(cls, dct, syscall):
if 'test_{}'.format(syscall) in dct:
return
dct['test_{}'.format(syscall)] = lambda self, syscall=syscall: \
self._syscall_generic(syscall)
class Nistar(TestCase):
__metaclass__ = NistarMeta
def setUp(self):
self.ctx = ctx.newctx()
self.kernelstate = state.NistarState()
self.solver = self.Solver()
self.solver.set(AUTO_CONFIG=False)
self._set_name()
self._pre = spec.state_equiv(self.ctx, self.kernelstate)
self.solver.add(self._pre)
self.ctx.add_assumption(spec.impl_invariants(self.ctx))
def tearDown(self):
if isinstance(self.solver, solver.Solver):
del self.solver
def _test(self, name):
args = getattr(proto, name)()
inv = []
sargs = spec_args(args)
for arg in args:
if hasattr(arg, 'packing_invariants'):
inv.append(arg.packing_invariants())
kret, ks = getattr(spec, name)(self.kernelstate, *sargs)
iret = self.ctx.call("@{}".format(name), *flatten(args))
if iret is None:
iret = 0
if self.ctx.assumptions:
print "WARN: Adding {} assumptions".format(len(self.ctx.assumptions))
for i in self.ctx.assumptions:
print i
self.solver.add(z3.And(*self.ctx.assumptions))
if name != 'sched_next':
# We are not idling.
self.solver.add(self.kernelstate.current != self.kernelstate.idle)
self.solver.add(z3.And(inv))
if isinstance(kret, util.Cases):
m = self._prove(z3.And(iret == kret.to_ite(),
spec.state_equiv(self.ctx, ks)),
pre=z3.And(z3.BoolVal(True), self._pre),
return_model=self.INTERACTIVE)
else:
m = self._prove(z3.And((iret == 0) == kret,
spec.state_equiv(self.ctx, ks)),
pre=z3.And(z3.BoolVal(True), self._pre),
return_model=self.INTERACTIVE)
if m:
print m
ctx = self.ctx
from ipdb import set_trace; set_trace()
def _syscall_generic(self, name):
self._test(name)
class NistarAssumptions(TestCase):
__metaclass__ = NistarMeta
def setUp(self):
self.ctx = ctx.newctx()
self.kernelstate = state.NistarState()
self.solver = self.Solver()
self.solver.set(AUTO_CONFIG=False)
self._set_name()
self._pre = spec.state_equiv(self.ctx, self.kernelstate)
self.solver.add(self._pre)
self.ctx.add_assumption(spec.impl_invariants(self.ctx))
def tearDown(self):
if isinstance(self.solver, solver.Solver):
del self.solver
def _test(self, name):
inv = ['ok', 'current_thread_valid', 'freelist_ok',
'ufreelist_ok', 'label_unique_ok', 'tls_unique_ok']
for i in inv:
self.solver.add(getattr(spec.lemmas, i)(self.kernelstate))
if name != 'sched_next':
self.solver.add(self.kernelstate.current != self.kernelstate.idle)
args = getattr(proto, name)()
self.ctx.call("@{}".format(name), *flatten(args))
conds = self.ctx.assumptions
self._prove(z3.And(*conds))
def _syscall_generic(self, name):
self._test(name)
class NistarFlowTests(TestCase):
def setUp(self):
self.s1 = state.NistarState()
self.s2 = state.NistarState()
self.L = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t))
self.solver = self.Solver()
self.solver.set(AUTO_CONFIG=False)
self.solver.set(MODEL=self.MODEL_HI)
self._set_name()
def tearDown(self):
if isinstance(self.solver, solver.Solver):
del self.solver
def test_flow_w(self):
T = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t),
util.FreshFunction('ownership', dt.tag_t, dt.bool_t))
L = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t))
K = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t))
# T ⋢⁺ L /\ K ⊑ L => T ⋢⁺ K
self._prove(z3.Implies(
z3.And(z3.Not(l.can_write(T, L, T[2])),
l.flow(K, L)),
z3.Not(l.can_write(T, K, T[2]))))
def test_flow_r(self):
T = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t),
util.FreshFunction('ownership', dt.tag_t, dt.bool_t))
L = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t))
K = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t))
# L ⋢⁺ T /\ L ⊑ K => K ⋢⁺ T
self._prove(z3.Implies(
z3.And(z3.Not(l.can_read(T, L, T[2])),
l.flow(L, K)),
z3.Not(l.can_read(T, K, T[2]))))
def test_flow_w2(self):
T = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t),
util.FreshFunction('ownership', dt.tag_t, dt.bool_t))
L = (util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t))
tag = util.FreshBitVec('tag', dt.tag_t)
# If a thread can not write to L is the same as saying
# there exists a tag in L's integrity label but not in T's integrity label,
# or there exists a tag in T's secrecy label but not in L's secrecy label.
# T ⋢⁺ L <=> ∃ t. (Lintegrity(t) /\ ¬ Townership(t) /\ ¬ Tintegrity(t)) \/
# (¬ Lsecrecy(t) /\ ¬ Townership(t) /\ Tsecrecy(t))
self._prove(
z3.Not(l.can_write(T, L, T[2])) ==
z3.Exists([tag],
z3.Or(
z3.And(
L[1](tag), # Lintegrity(t)
z3.Not(T[2](tag)), # ¬ Townership(t)
z3.Not(T[1](tag))), # ¬ Tintegrity(t)
z3.And(
z3.Not(L[0](tag)), # ¬ Lsecrecy(t)
z3.Not(T[2](tag)), # ¬ Townership(t)
T[0](tag)), # Tsecrecy(t)
)))
class NistarInvariantTests(TestCase):
def setUp(self):
self.ctx = ctx.newctx()
self.state = state.NistarState()
self.solver = self.Solver()
self.solver.set(AUTO_CONFIG=False)
self.solver.set(MODEL=self.MODEL_HI)
self._set_name()
def test_impl_invariant(self):
I = spec.impl_invariants(self.ctx)
self._solve(I)
def test_spec_invariants(self):
self._solve(spec.spec_invariants(self.state.initial()))
def test_equiv_inv(self):
old = dt.NR_PAGES
dt.NR_PAGES = 10
self.state = self.state.initial()
I = spec.impl_invariants(self.ctx)
S = spec.spec_invariants(self.state)
e = spec.state_equiv(self.ctx, self.state)
self._solve(z3.And(I, S, e))
dt.NR_PAGES = old
class NistarSpecLemmaMeta(NistarMeta):
@classmethod
def _add_syscall(cls, dct, syscall):
for lemma in dct['lemmas']:
if not 'test_{}_{}'.format(syscall, lemma) in dct:
dct['test_{}_{}'.format(syscall, lemma)] = lambda self, syscall=syscall, lemma=lemma: self._test_lemma(syscall, lemma)
class NistarSpecLemma(TestCase):
__metaclass__ = NistarSpecLemmaMeta
def setUp(self):
self.state = state.NistarState()
self.solver = self.Solver()
self.solver.set(AUTO_CONFIG=False)
self.solver.set(MODEL=self.MODEL_HI)
self._set_name()
def test_sat(self):
old = (dt.NR_PAGES, dt.NR_PAGES2M)
dt.NR_PAGES, dt.NR_PAGES2M = 10, 2
conj = []
for i in self.lemmas:
conj.append(getattr(spec.lemmas, i)(self.state))
self._solve(z3.And(*conj))
dt.NR_PAGES, dt.NR_PAGES2M = old
def _test_lemma(self, syscall, lemma_name):
args = spec_args(getattr(proto, syscall)())
lemma = getattr(spec.lemmas, lemma_name)
for i in self.lemmas:
cond = getattr(spec.lemmas, i)(self.state)
if i == lemma_name:
pre = cond
self.solver.add(cond)
_, state = getattr(spec, syscall)(self.state, *args)
for i in self.lemmas:
if i == lemma_name:
continue
print "Assuming lemma", i
cond = getattr(spec.lemmas, i)(self.state)
self.solver.add(cond)
post = lemma(state)
self._prove(post, pre=pre)
lemmas = [
'ok',
'current_thread_valid',
'freelist_ok',
'ufreelist_ok',
'label_unique_ok',
'tls_unique_ok',
# Should be true, but its not used by anyone.
# 'tags_unused_ok',
]
class NistarNI(TestCase):
__metaclass__ = NistarMeta
def setUp(self):
self.s = state.NistarState()
self.t = state.NistarState()
# Domain
self.L = dt.Domain(
util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t),
util.FreshFunction('ownership', dt.tag_t, dt.bool_t))
self.solver = self.Solver()
self.solver.set(AUTO_CONFIG=False)
self.solver.set(MODEL=self.MODEL_HI)
self._set_name()
def _domain(self, s, name, args, widle=False):
secrecy = s.pages.label[s.pages[s.current].secrecy].label
integrity = s.pages.label[s.pages[s.current].integrity].label
ownership = s.pages.label[s.pages.ownership(s.current)].label
if name == 'sys_alloc_tag':
tag_lo = s.current
tag_hi = s.pages[s.current].tag_counter
tag = z3.Concat(tag_hi, tag_lo)
ownership = lambda t, ownership=ownership: \
z3.If(t == tag, z3.BoolVal(True), ownership(t))
elif name == 'sched_next':
secrecy = l.empty()
integrity = l.universal()
ownership = l.empty()
elif name == 'do_gate_enter':
ownership = l.union(ownership, args[2].has)
if widle and name != 'sched_next':
secrecy = lambda tag, old=secrecy: util.If(s.current == s.idle, z3.BoolVal(True), old(tag))
integrity = lambda tag, old=integrity: util.If(s.current == s.idle, z3.BoolVal(False), old(tag))
ownership = lambda tag, old=ownership: util.If(s.current == s.idle, z3.BoolVal(False), old(tag))
return dt.Domain(secrecy, integrity, ownership)
def _syscall_generic(self, name):
specfn = getattr(spec, name)
args = self._get_args(name)
dom = lambda s: self._domain(s, name, args)
lemmas = spec.lemmas.spec_to_lemma(name)
self._test(specfn=specfn, specargs=args, dom=dom, syscall_name=name, lemmas=lemmas)
def _get_args(self, name):
return spec_args(getattr(proto, name)())
def _apply_lemmas(self, state, lemmas):
for lemma in lemmas:
print "Applying lemma {}".format(lemma.__name__)
self.solver.add(lemma(state))
class OutputConsistency(NistarNI):
def _test(self, specfn=None, specargs=None, syscall_name=None, dom=None, lemmas=None):
# s and t are `safe` states.
self._apply_lemmas(self.s, lemmas)
self._apply_lemmas(self.t, lemmas)
# s ~dom(a, s) t
self.solver.add(
spec.ni.obs_equivalence(self.s, self.t, dom(self.s)))
if syscall_name != 'sched_next':
self.solver.add(self.s.current != self.s.idle)
self.solver.add(self.t.current != self.t.idle)
# output(s, a)
sout, _ = specfn(self.s, *specargs)
tout, _ = specfn(self.t, *specargs)
if isinstance(sout, util.Cases) or isinstance(tout, util.Cases):
sout = sout.to_ite()
tout = tout.to_ite()
m = self._prove(sout == tout, return_model=self.INTERACTIVE)
if m:
print m
from ipdb import set_trace; set_trace()
class LocalRespect(NistarNI):
def _test(self, specfn=None, specargs=None, syscall_name=None, dom=None, lemmas=None):
if spec.lemmas.label_unique_ok not in lemmas:
lemmas.append(spec.lemmas.label_unique_ok)
# dom(a, s) ⋢ L
self.solver.add(z3.Not(
l.interference(dom(self.s), self.L)))
if syscall_name != 'sched_next':
self.solver.add(self.s.current != self.s.idle)
# s is a `safe` state.
self._apply_lemmas(self.s, lemmas)
# step(s, a)
_, s1 = specfn(self.s, *specargs)
# Show that s ~L step(s, a)
m = self._prove(spec.ni.obs_equivalence(self.s, s1, self.L), return_model=self.INTERACTIVE)
if m:
print m
from ipdb import set_trace; set_trace()
class StepConsistency(NistarNI):
def _test_current_equal(self, name):
# Show that observational equivalence implies that
# s.current == t.current for all operations (except 'sched_next')
args = self._get_args(name)
dom = lambda s: self._domain(s, name, args, widle=True)
self._apply_lemmas(self.s, [spec.lemmas.ok, spec.lemmas.current_thread_valid])
self._apply_lemmas(self.t, [spec.lemmas.ok, spec.lemmas.current_thread_valid])
# s ~L t
self.solver.add(
spec.ni.obs_equivalence(self.s, self.t, self.L))
# s ~dom(a, s) t
self.solver.add(
spec.ni.obs_equivalence(self.s, self.t, dom(self.s)))
self._prove(self.s.current == self.t.current)
def test_current_equal_normal(self):
# Any operation that doesn't have a "special" domain.
self._test_current_equal('sys_container_get_root')
def test_current_equal_sys_alloc_tag(self):
self._test_current_equal('sys_alloc_tag')
def test_current_equal_do_gate_enter(self):
self._test_current_equal('do_gate_enter')
def _test(self, specfn=None, specargs=None, syscall_name=None, dom=None, lemmas=None):
# s and t are `safe` states.
self._apply_lemmas(self.s, lemmas)
self._apply_lemmas(self.t, lemmas)
# s ~L t
self.solver.add(
spec.ni.obs_equivalence(self.s, self.t, self.L))
# s ~dom(a, s) t
self.solver.add(
spec.ni.obs_equivalence(self.s, self.t, dom(self.s)))
# Only operation enabled for idle thread is sched_next
if syscall_name != 'sched_next':
self.solver.add(self.s.current != self.s.idle)
self.solver.add(self.t.current != self.t.idle)
# step(s, a)
_, s1 = specfn(self.s, *specargs)
_, t1 = specfn(self.t, *specargs)
# step(s, a) and step(t, a) are safe. Speeds up these two proofs significantly.
if syscall_name in ['sys_container_move_uquota']:
self._apply_lemmas(s1, lemmas)
self._apply_lemmas(t1, lemmas)
# show that s ~L t
m = self._prove(spec.ni.obs_equivalence(s1, t1, self.L), return_model=self.INTERACTIVE)
if m:
print m
from ipdb import set_trace; set_trace()
class DomainConsistency(NistarNI):
def _test(self, specfn=None, specargs=None, syscall_name=None, dom=None, lemmas=None):
# s and t are `safe` states.
self._apply_lemmas(self.s, lemmas)
self._apply_lemmas(self.t, lemmas)
self.solver.add(self.s.current != self.s.idle)
self.solver.add(self.t.current != self.t.idle)
# s ~dom(a, s) t
self.solver.add(
spec.ni.obs_equivalence(self.s, self.t, dom(self.s)))
# show that dom(a, s) = dom(a, t)
m = self._prove(l.equal(dom(self.s), dom(self.t)))
if m:
print m
from ipdb import set_trace; set_trace()
class EqvTest(ni.EquivalenceProp):
doms = [('label', dt.Domain(
util.FreshFunction('secrecy', dt.tag_t, dt.bool_t),
util.FreshFunction('integrity', dt.tag_t, dt.bool_t),
util.FreshFunction('ownership', dt.tag_t, dt.bool_t)))]
state = state.NistarState
lemmas = [spec.lemmas.ok, spec.lemmas.freelist_ok]
def obs_equivalence(self, s, t, L):
return spec.ni.obs_equivalence(s, t, L)
if __name__ == "__main__":
libirpy.unittest.main()
|
import sys
import datetime
from multiprocessing import Pool
from icg import card
def multiSim(idx):
pool = card.generate_pool()
stats = {}
stats['triggers'] = {}
for c in pool:
for effect in c.effects:
stats['triggers'][effect.triggerName] = stats['triggers'].get(effect.triggerName, 0) + 1
return stats
def combineDictOfInts(d1, d2):
total = d1.copy()
for key, value in d2.items():
total[key] = total.get(key, 0) + value
return total
startTime = datetime.datetime.now()
effects = {}
sims = 10
try:
sims = int(sys.argv[1])
print(f'simulating {sims} times')
except (IndexError, ValueError):
print('simulating 10 times')
if sims == 1:
verbose = True
pool = Pool()
for simStats in pool.imap_unordered(multiSim, range(sims)):
effects = combineDictOfInts(effects, simStats['triggers'])
effectPct = []
# triggerWeights = {tt.name: tt.weight for tt in card.triggerTypes}
for effectName, count in effects.items():
effectPct.append((effectName, count / sims))
effectPct = sorted(effectPct, key=lambda t: t[1], reverse=True)
print()
for name, pct in effectPct:
print('{0} trigger ratio: {1:.2f}'.format(name, pct))
|
from django.db import models
# Create your models here.
class Project(models.Model):
client = models.CharField(max_length=200, null=True)
logo = models.FileField(blank=True)
location = models.CharField(max_length=200, null=True)
vessel = models.CharField(max_length=200, null=True)
def __str__(self):
return self.client
class Anomaly(models.Model):
TYPE = (('AW', 'AW'),('DB', 'DB'),)
STATUS = (('Client_signed', 'Client_signed'),('Not_Signed', 'Not_Signed'),)
HIST = (('Yes', 'Yes'), ('No', 'No'),)
CRIT = (('Select', 'Select'), ('Low', 'Low'), ('Medium', 'Medium'), ('High', 'High'),)
REF = (('N/A', 'N/A'), ('Hist-001', 'Hist-001'), ('Hist-001', 'Hist-001'),)
client = models.ForeignKey(Project, null=True, on_delete= models.CASCADE)
asset = models.CharField(max_length=200, null=True)
component = models.CharField(max_length=200, null=True)
sub_component = models.CharField(max_length=200, null=True)
anomaly_id = models.CharField(max_length=200, null=True)
criticality = models.CharField(max_length=200, default='Select', choices=CRIT)
code = models.CharField(max_length=200, null=True, choices=TYPE)
DateTime = models.DateTimeField(null=True)
is_hist = models.CharField(max_length=200, default='No', choices=HIST)
hist_ref = models.CharField(max_length=200, default='N/A', choices=REF)
comments = models.TextField(null=True)
review_status = models.CharField(max_length=200, default='Not_Signed', choices=STATUS)
pdf_upload = models.FileField(null=True)
inspector = models.CharField(max_length=200, null=True)
coord = models.CharField(max_length=200, null=True)
obcr = models.CharField(max_length=200, null=True)
image_1 = models.FileField( null=True)
image_1_description = models.CharField(max_length=200, default='Image_1', blank=True)
image_2 = models.FileField( null=True)
image_2_description = models.CharField(max_length=200, default='Image_2', blank=True)
video = models.FileField( null=True)
video_description = models.CharField(max_length=200, default='Anomaly Video', blank=True)
def __str__(self):
return self.anomaly_id
# class Customer(models.Model):
# name = models.CharField(max_length=200, null=True)
# phone = models.CharField(max_length=200, null=True)
# email = models.CharField(max_length=200, null=True)
# date_created = models.DateTimeField(auto_now_add=True, null=True)
#
# def __str__(self):
# return self.name
#
#
# class Tag(models.Model):
# name = models.CharField(max_length=200, null=True)
#
# def __str__(self):
# return self.name
#
# class Product(models.Model):
# CATEGORY = (
# ('Indoor', 'Indoor'),
# ('Out Door', 'Out Door'),
# )
#
# name = models.CharField(max_length=200, null=True)
# price = models.FloatField(null=True)
# category = models.CharField(max_length=200, null=True, choices=CATEGORY)
# description = models.CharField(max_length=200, null=True, blank=True)
# date_created = models.DateTimeField(auto_now_add=True, null=True)
# tags = models.ManyToManyField(Tag)
#
# def __str__(self):
# return self.name
#
# class Order(models.Model):
# STATUS = (
# ('Pending', 'Pending'),
# ('Out for delivery', 'Out for delivery'),
# ('Delivered', 'Delivered'),
# )
#
# customer = models.ForeignKey(Customer, null=True, on_delete= models.SET_NULL)
# product = models.ForeignKey(Product, null=True, on_delete= models.SET_NULL)
# date_created = models.DateTimeField(auto_now_add=True, null=True)
# status = models.CharField(max_length=200, null=True, choices=STATUS)
|
from django.urls import path, include
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework_xml.parsers import XMLParser
from rest_framework_xml.renderers import XMLRenderer
from rest_framework import routers, serializers, viewsets
from quiz.models import Category, Question, Badge, Player, Statistics
from django.contrib.auth.models import User
router = routers.DefaultRouter()
# Serializers define the API representation.
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ('id', 'name')
depth = 1
# ViewSets define the view behavior.
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
# Routers provide an easy way of automatically determining the URL conf.
router.register(r'categories', CategoryViewSet)
class QuestionSerializer(serializers.HyperlinkedModelSerializer):
category_id = serializers.CharField(write_only=True)
class Meta:
model = Question
fields = '__all__'
depth = 1
# ViewSets define the view behavior.
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
# Routers provide an easy way of automatically determining the URL conf.
router.register(r'questions', QuestionViewSet, )
class BadgeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Badge
fields = '__all__'
# ViewSets define the view behavior.
class BadgeViewSet(viewsets.ModelViewSet):
queryset = Badge.objects.all()
serializer_class = BadgeSerializer
# Routers provide an easy way of automatically determining the URL conf.
router.register(r'badges', BadgeViewSet, )
class PlayerSerializer(serializers.HyperlinkedModelSerializer):
badge_id = serializers.CharField(write_only=True)
class Meta:
model = Player
fields = '__all__'
depth = 1
# ViewSets define the view behavior.
class PlayerViewSet(viewsets.ModelViewSet):
queryset = Player.objects.all()
serializer_class = PlayerSerializer
# Routers provide an easy way of automatically determining the URL conf.
router.register(r'players', PlayerViewSet, )
class StatisticsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Statistics
fields = '__all__'
# ViewSets define the view behavior.
class StatisticsViewSet(viewsets.ModelViewSet):
queryset = Statistics.objects.all()
serializer_class = StatisticsSerializer
# Routers provide an easy way of automatically determining the URL conf.
router.register(r'statistics', StatisticsViewSet, )
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'password', 'email', 'is_staff')
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
# Routers provide an easy way of automatically determining the URL conf.
router.register(r'users', UserViewSet)
urlpatterns = [
path('rest/', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Wednesday, January 16th 2019, 2:03:59 pm
import argparse
import logging
import shutil
from pathlib import Path
from shutil import ignore_patterns as ig
import ibllib.io.extractors.base
import ibllib.io.flags as flags
import ibllib.io.raw_data_loaders as raw
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def main(local_folder: str, remote_folder: str, force: bool = False) -> None:
local_folder = Path(local_folder)
remote_folder = Path(remote_folder)
src_session_paths = [x.parent for x in local_folder.rglob("transfer_me.flag")]
if not src_session_paths:
log.info("Nothing to transfer, exiting...")
return
# Create all dst paths
dst_session_paths = []
for s in src_session_paths:
mouse = s.parts[-3]
date = s.parts[-2]
sess = s.parts[-1]
d = remote_folder / mouse / date / sess
dst_session_paths.append(d)
for src, dst in zip(src_session_paths, dst_session_paths):
src_flag_file = src / "transfer_me.flag"
flag = flags.read_flag_file(src_flag_file)
if isinstance(flag, list):
raise NotImplementedError
else:
if force:
shutil.rmtree(dst, ignore_errors=True)
log.info(f"Copying {src}...")
shutil.copytree(src, dst, ignore=ig(str(src_flag_file.name)))
# finally if folder was created delete the src flag_file and create compress_me.flag
if dst.exists():
task_type = ibllib.io.extractors.base.get_session_extractor_type(Path(src))
if task_type not in ['ephys', 'ephys_sync', 'ephys_mock']:
flags.write_flag_file(dst.joinpath('raw_session.flag'))
settings = raw.load_settings(dst)
if 'ephys' in settings['PYBPOD_BOARD']: # Any traing task on an ephys rig
dst.joinpath('raw_session.flag').unlink()
log.info(f"Copied to {remote_folder}: Session {src_flag_file.parent}")
src_flag_file.unlink()
# Cleanup
src_video_file = src / 'raw_video_data' / '_iblrig_leftCamera.raw.avi'
dst_video_file = dst / 'raw_video_data' / '_iblrig_leftCamera.raw.avi'
src_audio_file = src / 'raw_behavior_data' / '_iblrig_micData.raw.wav'
dst_audio_file = dst / 'raw_behavior_data' / '_iblrig_micData.raw.wav'
if src_audio_file.exists() and \
src_audio_file.stat().st_size == dst_audio_file.stat().st_size:
src_audio_file.unlink()
if src_video_file.exists() and \
src_video_file.stat().st_size == dst_video_file.stat().st_size:
src_video_file.unlink()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Transfer files to IBL local server')
parser.add_argument(
'local_folder', help='Local iblrig_data/Subjects folder')
parser.add_argument(
'remote_folder', help='Remote iblrig_data/Subjects folder')
args = parser.parse_args()
main(args.local_folder, args.remote_folder)
|
import os
HOST = 'localhost'
PORT = 11211
IPSTACK_ACCESS_KEY = os.environ.get('IPSTACK_ACCESS_KEY', '<your access key>')
|
class Stack:
def __init__(self):
self.items=[]
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def check_palindrome(input):
stack = Stack()
is_palindrome=False
for char in input:
stack.push(char)
for char in input:
if char == stack.pop():
is_palindrome= True
else:
is_palindrome = False
return is_palindrome
print(check_palindrome("123214"))
print(check_palindrome("hymyh"))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 17:21:23 2020
@author: idchiang
"""
from .hrchy_plot import *
|
# -*- coding: utf-8 -*-
"""
IMPORTANT!:
Before writing an email asking questions such as
'What does this input has to be like?' or
'What return value do you expect?' PLEASE read our
exercise sheet and the information in this template
carefully.
If something is still unclear, PLEASE talk to your
colleagues before writing an email!
If you experience technical issues or if you find a
bug we are happy to answer your questions. However,
in order to provide quick help in such cases we need
to avoid unnecessary emails such as the examples
shown above.
"""
from Bio.PDB.MMCIFParser import MMCIFParser # Tip: This module might be useful for parsing...
from Bio.Data.IUPACData import protein_letters_3to1
import numpy as np
############# Exercise 2: Protein Data Bank #############
# General remark: In our exercise every structure will have EXACTLY ONE model.
# This is true for nearly all X-Ray structures. NMR structures have several models.
class PDB_Parser:
def __init__(self, path):
'''
Initialize every PDB_Parser with a path to a structure-file in CIF format.
An example file is included in the repository (7ahl.cif).
Tip: Store the parsed structure in an object variable instead of parsing it
again & again ...
'''
cif_parser = MMCIFParser(QUIET=True) # parser object for reading in structure in CIF format
self.structure = cif_parser.get_structure('structure', path)
self.model = self.structure[0]
self.residue_dict = {k.upper(): v for d in [protein_letters_3to1, {'HOH': ''}] for k, v in d.items()}
# 3.8 Chains
def get_number_of_chains(self):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
Return:
Number of chains in this structure as integer.
'''
n_chains = len(self.model)
return n_chains
# 3.9 Sequence
def get_sequence(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the amino acid sequence (single-letter alphabet!) of a given chain (chain_id)
in a Biopython.PDB structure as a string.
'''
chain = self.model[chain_id]
sequence = ''.join(self.residue_dict[residue.resname] for residue in chain)
return sequence
# 3.10 Water molecules
def get_number_of_water_molecules(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the number of water molecules of a given chain (chain_id)
in a Biopython.PDB structure as an integer.
'''
chain = self.model[chain_id]
n_waters = sum(1 for residue in chain if residue.id[0] == 'W')
return n_waters
# 3.11 C-Alpha distance
def get_ca_distance(self, chain_id_1, index_1, chain_id_2, index_2):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id_1 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_1 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_2 : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
index_2 : index of a residue in a given chain in a Biopython.PDB structure
chain_id_1 and index_1 describe precisely one residue in a PDB structure,
chain_id_2 and index_2 describe the second residue.
Return:
Return the C-alpha (!) distance between the two residues, described by
chain_id_1/index_1 and chain_id_2/index_2. Round the returned value via int().
The reason for using two different chains as an input is that also the distance
between residues of different chains can be interesting.
Different chains in a PDB structure can either occur between two different proteins
(Heterodimers) or between different copies of the same protein (Homodimers).
'''
residue1 = self.model[chain_id_1][index_1]
residue2 = self.model[chain_id_2][index_2]
ca_distance = np.linalg.norm(residue1['CA'].coord - residue2['CA'].coord)
return int(ca_distance)
# 3.12 Contact Map
def get_contact_map(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return a complete contact map (see description in exercise sheet)
for a given chain in a Biopython.PDB structure as numpy array.
The values in the matrix describe the c-alpha distance between all residues
in a chain of a Biopython.PDB structure.
Only integer values of the distance have to be given (see below).
'''
chain = self.model[chain_id]
is_aa = lambda res: res.id[0] == ' ' # is amino acid?
length = sum(1 for res in chain if is_aa(res))
contact_map = np.zeros((length, length), dtype=np.float32)
for i, residue_i in zip(range(0, length),
(res for res in chain if is_aa(res))):
for j, residue_j in zip(range(i, length),
(res for res in chain if res.id[1] >= residue_i.id[1] and is_aa(res))):
try:
contact_map[i, j] = self.get_ca_distance(chain_id, residue_i.id, chain_id, residue_j.id)
contact_map[j, i] = contact_map[i, j]
except KeyError as err:
contact_map[i, j], contact_map[j, i] = np.nan, np.nan
print(err)
return contact_map.astype(np.int64) # return rounded (integer) values
# 3.13 B-Factors
def get_bfactors(self, chain_id):
'''
Input:
self: Use Biopython.PDB structure which has been stored in an object variable
chain_id : String (usually in ['A','B', 'C' ...]. The number of chains
depends on the specific protein and the resulting structure)
Return:
Return the B-Factors for all residues in a chain of a Biopython.PDB structure.
The B-Factors describe the mobility of an atom or a residue.
In a Biopython.PDB structure B-Factors are given for each atom in a residue.
Calculate the mean B-Factor for a residue by averaging over the B-Factor
of all atoms in a residue.
Sometimes B-Factors are not available for a certain residue;
(e.g. the residue was not resolved); insert np.nan for those cases.
Finally normalize your B-Factors using Standard scores (zero mean, unit variance).
You have to use np.nanmean, np.nanvar etc. if you have nan values in your array.
The returned data structure has to be a numpy array rounded again to integer.
'''
chain = self.model[chain_id]
length = len(chain) - self.get_number_of_water_molecules(chain_id)
b_factors = np.zeros(length, dtype=np.float32)
for i, residue in enumerate(chain):
if residue.id[0] == 'W': # if water molecule
break
temp_list = [(atom.bfactor if hasattr(atom, 'bfactor') else np.nan) for atom in residue.get_atoms()]
b_factors[i] = np.nanmean(temp_list)
b_factors = (b_factors - np.nanmean(b_factors)) / np.nanstd(b_factors)
return b_factors.astype(np.int64) # return rounded (integer) values
def main():
print('PDB parser class.')
x = PDB_Parser('tests/6aa6.cif')
return None
if __name__ == '__main__':
main() |
#Янова Даниэлла ИУ7-23
#Защита файлов
fl=open('Computer.txt','r') #Открываю и сохраняю записи файла
lines= fl.read().split()
fl.close()
fin=open('find.txt','w') #Создаю пустой файл для результатов
fin.close()
fin=open('find.txt','a+') #Открываю созданный файл для вноса результатов
s=input('Компьютеры какой стоимости вы хотите найти? ') #Задаю критерий
i=2
while i<len(lines):
r=lines[i-2]+' '+lines[i-1]+' '+lines[i]
if lines[i]==s: #Ищу нужные записи по критерию
fin.write(r)
fin.write('\n')
i+=3
fin.close()
|
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from collections import OrderedDict
#==============================================================================
class ErrorPlot:
"""
Creates error plot for specified data
Note: should be extended in future to provide more flexibility, such as
provision for specifying various graph parameters (e.g. xlim, ylim)
Caution: Currently this presumes that terminal dictionary keys for
observation are ('mean' and 'std') or ('min' and 'max'), and for prediction
is 'value'; with identical keys at all non-terminal levels.
"""
def __init__(self, testObj):
self.testObj = testObj
self.filename = "error_plot"
self.xlabels = ["(not specified)"] # self.testObj.observation.keys()
self.ylabel = "(not specified)"
def traverse_dicts(self, obs, prd, output = []):
# output will contain list with elements in the form:
# [("type", obs_mean, obs_std, prd_value), ... ] or
# [("type", obs_min, obs_max, prd_value), ... ]
# where "type" specifies whether observation is in the form of
# (mean,std) -> type="mean_sd", or (min,max) -> type="min_max"
od_obs = OrderedDict(sorted(obs.items(), key=lambda t: t[0]))
od_prd = OrderedDict(sorted(prd.items(), key=lambda t: t[0]))
flag = True
for key in od_obs.keys():
if flag is True:
if isinstance(od_obs[key], dict):
self.traverse_dicts(od_obs[key], od_prd[key])
else:
if "mean" in od_obs.keys():
output.append(("mean_sd",od_obs["mean"],od_obs["std"],od_prd["value"]))
elif "min" in od_obs.keys():
output.append(("min_max",od_obs["min"],od_obs["max"],od_prd["value"]))
else:
print("Error in terminal keys!")
raise
flag = False
return output
def create(self):
output = self.traverse_dicts(self.testObj.observation, self.testObj.prediction)
fig = plt.figure()
ix = 0
for (obs_type, obs_var1, obs_var2, prd_value) in output:
if obs_type == "mean_sd":
ax_o = plt.errorbar(ix, obs_var1, yerr=obs_var2, ecolor='black', elinewidth=2,
capsize=5, capthick=2, fmt='ob', markersize='5', mew=5)
elif obs_type == "min_max":
ax_o = plt.plot([ix, ix],[obs_var1, obs_var2],'_b-', markersize=8, mew=8, linewidth=2.5)
else:
# should never be executed
print("ERROR! Unknown type of observation data.")
ax_p = plt.plot(ix, prd_value, 'rx', markersize='8', mew=2)
ix = ix + 1
plt.xticks(range(len(output)), self.xlabels, rotation=20)
plt.tick_params(labelsize=11)
plt.figlegend((ax_o,ax_p[0]), ('Observation', 'Prediction',), 'upper right')
plt.margins(0.1)
plt.ylabel(self.ylabel)
fig = plt.gcf()
fig.set_size_inches(8, 6)
filepath = self.testObj.path_test_output + self.filename + '.pdf'
plt.savefig(filepath, dpi=600,)
return filepath
|
import torch
from torchvision import datasets, transforms
import torchvision.models as models
from torch import nn
from collections import OrderedDict
import time
from torch import optim
from workspace_utils import active_session
import error_types as error
def _load_data(train_dir, test_dir='./flowers/test/'):
#print("_load_data entered : ", train_dir, test_dir)
# TODO: Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# TODO: Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir,transform=train_transforms)
test_data = datasets.ImageFolder(test_dir,transform=test_transforms)
# TODO: Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_data,batch_size=64,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data,batch_size=64,shuffle=True)
return train_loader, test_loader, train_data
def _build_model(arch, hidden_units):
# print("_build_model entered : ", arch, hidden_units)
model = None
if arch == 'vgg16':
model = models.vgg16(pretrained=True)
classifier_input_size = model.classifier[0].in_features
print('vgg16 classifier_input_size :', classifier_input_size)
elif arch == 'vgg13':
model = models.vgg13(pretrained=True)
classifier_input_size = model.classifier[0].in_features
print('vgg13 classifier_input_size :', classifier_input_size)
else:
print("[ERROR] _build_model - Unsupported Arch Option")
return error.UNSUPPORTED_ARCH_ERROR
print(model.classifier)
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_input_size,hidden_units)),
('relu1', nn.ReLU()),
('drop1', nn.Dropout(p=0.2)),
('fc5', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# print('inside _build_model : ')
# print(model)
return model
def _train_model(model, train_loader, test_loader, gpu, epochs, learning_rate):
# print("_train_model entered : ", gpu, epochs, learning_rate)
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda" if (gpu == True) and torch.cuda.is_available() else "cpu")
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
model.to(device)
running_loss = 0
print_every = 5
steps = 0
with active_session():
for epoch in range(epochs):
for inputs, labels in train_loader:
steps += 1
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(test_loader):.3f}.. "
f"Test accuracy: {accuracy/len(test_loader):.3f}")
running_loss = 0
model.train()
return model, optimizer
def train(data_directory, arch, hidden_units, epochs, gpu, learning_rate):
train_loader, test_loader, train_data = _load_data(data_directory)
model = _build_model(arch, hidden_units)
if model == error.UNSUPPORTED_ARCH_ERROR:
return error.UNSUPPORTED_ARCH_ERROR
model, optimizer = _train_model(model, train_loader, test_loader, gpu, epochs, learning_rate)
return model, train_data, optimizer
def save_model(model, train_data, optimizer, save_dir):
# TODO: Save the checkpoint
model.cpu()
classifier_input_size = model.classifier[0].in_features
checkpoint = {'state_dict': model.state_dict(),
'input_size': classifier_input_size,
'output_size': 102,
'epochs': 1,
'optimizer_state_dict': optimizer.state_dict(),
'class_to_idx': train_data.class_to_idx}
#print(optimizer.state_dict())
#print(model.state_dict())
torch.save(checkpoint, save_dir)
|
#!/usr/bin/env python3
#JSON file containing all print statements that the program outputs to the screen, this includes
#user input as well potential error messages.
#When passed into a class pulling the error messages, this is passed into the ld_json class which converts
#this json into a dictionary. The dictionary is then passed into another class, such as user_input, handler
#or tools. From there it is filtered for the appropriate outputs by calling Ld_json().class_name.
#From there, each individual error message can be accessed like a normal dictionary.
import json
class Ld_json:
def __init__(self):
with open("./app/print_outp.json") as f:
self.__dict__ = json.load(f)
#test1 = Ld_json().user_input
#print(test1['txid'])
|
from flask import Flask
from hatchet import Environment
def test_app_is_flask(app):
assert isinstance(app, Flask)
def test_app_is_testing_config(app):
assert app.config.get("ENV") == Environment.TEST
def test_app_has_sqlalchemy_connection_string(app):
assert app.config.get("SQLALCHEMY_DATABASE_URI") == "sqlite:///:memory:"
|
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
def home(request):
print(request) # It display <WSGIRequest 'GET> means it display the requested method i.e POST, GET, DELETE etc
print(dir(request)) # It disply all methods available in request
print(request.get_full_path()) # It display the path
return HttpResponse("<!DOCTYPE html><html><head><style>h1{color: red;}</style></head><body><h1>Hello World</body></html>")
# It return html response to the user using method HttpResponse
# HttpResponse returns only one response at a time but HttpResponse object return multiple response at a time
def home1(request):
respose = HttpResponse()
# respose = HttpResponse(content_type='applocation/json') # we can set our own content type to response
# respose = HttpResponse(content_type='text/html')
respose.content = 'Hello Shweta' # We can set content to whole page of html
respose.write('<p>This is response 1</p>')
respose.write('<p>This is response 2</p>')
respose.write('<p>This is response 3</p>')
respose.write('<p>This is response 4</p>')
return respose
def redirect_to_new(request):
return HttpResponseRedirect('some/new')
# It render to new URL/Page |
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
ht={ }
for string in strs:
ss=''.join(sorted(string))
if ss in ht:
ht[ss].append(string)
else:
ht[ss]=[string]
return ht.values()
|
from django.conf import settings
import os
FILE_TYPE_CHOICES = (
('im', 'Image'),
('vi', 'Video'),
('au', 'Audio'),
)
STATIC_FILE_PATH = os.path.join('mail') |
# -*- coding: utf-8 -*-
import numpy as np
class MPCEnv(object):
def __init__(self,
dynamics,
renderer,
reward_system,
dt,
use_visual_state=False):
"""
Arguments:
dynamics:
Agent dynamics
renderer:
Renderer or list of Renderer (Agent renderer)
reward_system:
RewardSystem or None
dt:
Timestep
use_visual_state:
Whether to use visual state output or not (bool)
"""
self.dynamics = dynamics
self.renderer = renderer
self.reward_system = reward_system
self.dt = dt
self.use_visual_state = use_visual_state
self.reset()
def reset(self, x_init=None):
"""
Reset the environment:
Arguments:
x_init:
Initial state (can be None)
Returns:
Current state after resetting
"""
if x_init is None:
self.x = np.zeros((self.dynamics.x_dim,), dtype=np.float32)
else:
self.x = np.copy(x_init)
if self.reward_system is not None:
self.reward_system.reset()
return self.get_state(action=np.zeros((self.dynamics.u_dim,)))
def get_state(self, action):
if self.use_visual_state:
return self.get_visual_state(action)
else:
return self.x
def get_visual_state(self, action):
if isinstance(self.renderer, list):
# For multi angle view rendering
renderer_size = len(self.renderer)
images = []
for i in range(renderer_size):
# Render control object
image = self.renderer[i].render(self.x, action)
# Render rewards
if self.reward_system is not None:
self.reward_system.render(image)
images.append(image)
return np.stack(images)
else:
# For single angle view rendering
# Render control object
image = self.renderer.render(self.x, action)
# Render rewards
if self.reward_system is not None:
self.reward_system.render(image)
return image
def step(self, action):
"""
Step forward the environment.
Arguments:
action
Control signal
Returns:
(state, reward)
"""
xdot = self.dynamics.value(self.x, action)
self.x = self.x + xdot * self.dt
# Calculate reward
if self.reward_system is not None:
reward = self.reward_system.evaluate(self.x, self.dt)
else:
reward = 0.0
return self.get_state(action), reward
@property
def u_dim(self):
return self.dynamics.u_dim
@property
def x_dim(self):
return self.dynamics.x_dim
|
# This script extract a python list from a data frame column in a CSV file
# In other words in helps dealing from Python to R
import pandas as pd
df = pd.read_csv('/Users/Ofix/Documents/Fac/internat/Recherche/projets/synchro/synchroData/Git/INCANT/Data/CSV/studyInfoData/indexList.slideddata.csv')
saved_column = df.x #you can also use df['column_name']
print(saved_column.tolist())
|
from flask import Flask
from flask_apscheduler import APScheduler
import os
import datetime
from .blueprint.home.routes import home_bp, joke
from .blueprint.facebook.routes import facebook_bp, downloadFaces
from .blueprint.admin.routes import admin_bp
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_FACEBOOK_FACES')
# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@localhost/facebook_faces'
app.secret_key = os.environ.get('SECRET_KEY_FBFACE')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.register_blueprint(home_bp)
app.register_blueprint(facebook_bp)
app.register_blueprint(admin_bp)
sched = APScheduler()
sched.init_app(app)
sched.start()
with app.app_context() :
from .models import db
db.init_app(app)
db.create_all()
app.apscheduler.add_job(func=scheduler1, trigger="interval", args=[app], seconds=300, id="download_job_id", next_run_time=datetime.datetime.now())
return app
def scheduler1(a) :
print("STARTING SCHEDULE TASK")
with a.app_context() :
res = downloadFaces()
print(res) |
import subprocess
def Run(command_str, sync_output = False):
print "Run: %s"%command_str
if sync_output:
subprocess.call(command_str)
print "----------"
else:
output = subprocess.check_output(command_str)
print "Output: %s"%output
print "----------"
return output
if __name__ == '__main__':
pass |
"""IoT application framework interface
This module provides an interface to the IoT application framework. The
framework provides a simple inter application communication framework and
event system.
The main feature of this module is the IotApp class, which provides bindings
to the IoT application framework C-library. IotApp interfaces with a wrapper
library which implements the actual communication with the IoT application
framework library.
Special types used in documentation:
U: type of user_data field of IotApp. Can be anything.
W: type of data provided to the send_event callback.
Json: anything compatible with Pythons json.loads() and json.dumps()
methods and json-c library (eg. dict, list or string)
NOTE: the data format may be restricted to Python dictionaries
in the future.
"""
from __future__ import print_function # for Python 3 compatibility
import _appfwwrapper as appfwwrapper
import pwd
import json
import inspect
import logging
import traceback
def _verify_signature(func, name, argc):
if func == None or (callable(func) and
len(inspect.getargspec(func)[0]) == argc):
return func
else:
raise TypeError("%s must be callable and accept exactly " +
"%d arguments" % (name, argc))
_logger = logging.getLogger("__appfw__")
class IotApp(object):
"""Bindings to the application framework wrapper library.
NOTE:
In case an exception is raised during the execution of any callback
function, the IotApp aborts the execution of Python and prints
the exception.
If the user wants to terminate the program after receiving an event,
the correct method is the quit the mainloop.
Attributes:
user_data (U): a reference to user defined data which is
delivered to the status callback.
"""
def __init__(self, event_handler=None, status_handler=None,
user_data=None):
"""(event_callback, status_callback, U) -> IotApp
IotApp constructor
The constructor is responsible of establishing a connection to the
application framework and initializing the wrapper library.
Args:
event_handler (event_handler_callback): callback
function which is called when an event is received.
status_handler (status_handler_callback): callback
function called when the event subscriptions are updated.
user_data (U): initial user defined data which is provided to
status callback invocations.
"""
_logger.debug("__init__ IotApp")
self.event_handler = event_handler
self.status_handler = status_handler
self.user_data = user_data
appfwwrapper.init(self, self._event_handler,
self._status_handler, self._send_handler,
self._list_handler)
# has to be called after initialization!
self.subscriptions = set()
# dictionaries and counter for 'send' and 'list' callbacks
self._callbacks = {}
self._arguments = {}
self._callback_id = 0
def __del__(self):
_logger.debug("__del__ IotApp")
appfwwrapper.clear()
def enable_signals(self):
"""(None) -> None
Request the delivery of certain signals as IoT events.
Request the delivery of SIGHUP and SIGTERM signals as IoT events.
The events are delivered to the 'event_handler'.
"""
appfwwrapper.enable_signals()
def send_event(self, event, data, callback=None,
callback_data=None, **target):
"""(str, json, send_callback, W) -> None
Send a new event to the application framework.
args:
event (str): the name of event.
data (Json): data attached to the event.
callback (func(id, status, msg, callback_data) -> None): A
callback function which is invoked after the emitting the
events has finished. Signature specification below.
callback_data (W): Data supplied to the callback function.
**target: Keywords used to specify the target application(s).
Recognized keywords are:
- label (str): SMACK label
- appid (str): application id
- binary (str): executed binary
- user (str): the name of the (linux) user of the target
- process (int): the process id.
NOTE: default values are interpreted as
broadcasting.
NOTE: at least one keyword argument must be
specified.
Send callback specification:
func(id, status, msg, user_data) -> None
id (int): Internal application framework message ID
status (int): ???
msg (str): ???
callback_data (W): Data provided to the 'send_event' function as
callback_data
"""
string_data = json.dumps(data)
_logger.debug(str(string_data))
# Remove Nones from target
target = dict(filter(lambda item: item[1] is not None,
target.items()))
if (callback != None):
_verify_signature(callback, "Send callback", 4)
self._callbacks[self._callback_id] = callback
self._arguments[self._callback_id] = callback_data
if ('user' in target and target['user']):
target['user'] = pwd.getpwnam(target['user']).pw_uid
appfwwrapper.send_event(event=event, json_string=string_data,
send_id=self._callback_id, **target)
self._callback_id += 1
def update_subscriptions(self):
"""(None) -> None
Send the current set of subscriptions to the application
framework.
NOTE:
This method must be called manually if the subscriptions are
modified in place.
"""
appfwwrapper.subscribe_events(list(self.subscriptions))
def __list(self, list_function, callback, callback_data=None):
"""(list_function, list_callback, W) -> None
Helper function for list_running and list_all. Contains the common
functionality in order to avoid code duplication.
args:
list_function (func(callback_id))
actual framework function to be called
callback (func(app_list, id, status, msg, callback_data) -> None):
Callback function. See list_all or list_running for
documentation
callback_data (W): Data supplied to the callback function.
"""
try:
_logger.debug("appfw, __list")
_verify_signature(callback, "List callback", 5)
self._callbacks[self._callback_id] = callback
self._arguments[self._callback_id] = callback_data
list_function(self._callback_id)
self._callback_id += 1
except Exception as e:
traceback.print_exc()
print("Zero status was returned from iot_app_list_* C-API")
print(e.message)
sys.exit(1)
def list_running(self, callback, callback_data=None):
"""Send a request for the list of running applications to the
application framework. Callback argument count is verified.
args:
callback (func(app_list, id, status, msg, callback_data) -> None)
A callback function which is invoked eventually. See
specification below.
callback_data (W): Data supplied to the callback function.
List callback specification:
func(app_list, id, status, msg, callback_data) -> None
app_list: List of applications. List contains dictionaries with
strings 'appid' and 'desktop' as keys and associated values
which are either string or None
id (int): Internal application framework message ID
status (int): ???
msg (str): ???
callback_data (W): Data provided to the 'list_running' function as
callback_data
"""
_logger.debug("appfw, list_running")
self.__list(appfwwrapper.list_running, callback, callback_data)
def list_all(self, callback, callback_data=None):
"""Send a request for the list of running applications to the
application framework. Callback argument count is verified.
args:
callback (func(app_list, id, status, msg, callback_data) -> None)
A callback function which is invoked eventually. See
specification below.
callback_data (W): Data supplied to the callback function.
List callback specification:
func(app_list, id, status, msg, callback_data) -> None
app_list: List of applications. List contains dictionaries with
strings 'appid' and 'desktop' as keys and associated values
which are either string or None
id (int): Internal application framework message ID
status (int): ???
msg (str): ???
callback_data (W): Data provided to the 'list_all' function as
callback_data
"""
_logger.debug("appfw, list_running")
self.__list(appfwwrapper.list_all, callback, callback_data)
def _enable_debug(self, debug=["*"]):
logging.basicConfig()
_logger.setLevel(logging.DEBUG)
_logger.debug("enable_debug")
_logger.debug(debug)
appfwwrapper.enable_debug(debug)
def _event_handler(self, event, data):
_logger.debug("Python internal event callback")
try:
json_data = json.loads(data)
if (self._external_event_handler != None):
self._external_event_handler(event, json_data)
except Exception as e:
# If exceptions are not caught here the wrapper library destroys
# them.
traceback.print_exc()
print("Event handler threw an exception after receiving a " +
"callback. Aborting:")
print(e.message)
sys.exit(1)
def _status_handler(self, seqno, status, msg, data):
_logger.debug("Python internal status callback")
try:
json_data = json.loads(data)
if (self._external_status_handler != None):
self._external_status_handler(seqno, status, msg,
json_data, self.user_data)
except Exception as e:
traceback.print_exc()
print("Status handler threw an exception after receiving a " +
"callback. Aborting:")
print(e.message)
sys.exit(1)
def _send_handler(self, callback_id, id, status, msg):
_logger.debug("Python internal send callback")
try:
if (callback_id in self._callbacks):
self._callbacks[callback_id](
id, status, msg, self._arguments[callback_id])
del self._callbacks[callback_id]
del self._arguments[callback_id]
except Exception as e:
traceback.print_exc()
print("Send handler threw an exception after receiving a " +
"callback. Aborting:")
print(e.message)
sys.exit(1)
def _list_handler(self, callback_id, id, status, msg, apps):
_logger.debug("Python internal list callback")
try:
if (callback_id in self._callbacks):
self._callbacks[callback_id](
apps, id, status, msg, self._arguments[callback_id])
del self._callbacks[callback_id]
del self._arguments[callback_id]
except Exception as e:
traceback.print_exc()
print("List handler threw an exception after receiving a " +
"callback. Aborting:")
print(e.message)
sys.exit(1)
@property
def event_handler(self):
"""func(event, data) -> None: a callback function which is invoked
when an event is received.
Event callback specification:
event (str): The name of an event.
data (Json): The data associated with the event.
"""
return self._external_event_handler
@event_handler.setter
def event_handler(self, handler):
self._external_event_handler = _verify_signature(handler,
"Event handler",
2)
@property
def status_handler(self):
"""func(seqno, status, msg, data, user_data) -> None: a callback
function which is invoked after event subscriptions.
Status callback specification:
seqno (int): Application framework sequence number of associated
request.
status (int): Request status (0 ok, non-zero error).
msg (str): Error message.
data (Json): Optional request-specific status data.
user_data (U): A reference to 'user_data' supplied to the IotApp
instance
"""
return self._external_status_handler
@status_handler.setter
def status_handler(self, handler):
self._external_status_handler = _verify_signature(handler,
"Status handler",
5)
@property
def subscriptions(self):
"""iterable: the set of events this IotApp instance is subscribed
to.
There are two ways to modify the subscriptions of an IotApp.
-1 By assigning manually a list of event names to the
'subscriptions' field, the 'IotApp' automatically updates
the subscriptions on the application framework server.
A single string is also accepted as a valid assignment.
-2 By modifying the 'subscriptions' in place, the application
framework server only updates the subscriptions after
update_subscriptions call.
NOTE: If a new list of events is assigned, a call to the
status_callback will occur.
Examples:
>>> app = appfw.IotApp()
>>> app.subscriptions = ["cat_event", "dog_event"]
>>> app = appfw.IotApp()
>>> app.subscriptions = "fox_event"
>>> app = appfw.IotApp()
>>> app.subscriptions.add("frog_event")
>>> app.subscriptions.add("seal_event")
>>> app.subscriptions.remove("frog_event")
>>> app.update_subscriptions()
"""
return self.__subscriptions
@subscriptions.setter
def subscriptions(self, subscriptions):
if (isinstance(subscriptions, str)):
self.__subscriptions = set({subscriptions})
else:
self.__subscriptions = set(subscriptions)
self.update_subscriptions()
|
from app import app
app.config['UPLOAD_FOLDER'] = '/tmp/codehost/uploads/binaries'
app.run(host='0.0.0.0',debug=True) |
# Import necessary libraries
import pandas as pd
def prep_data(df):
'''
Function to dummy variable all categorical columns and reorder columns into useful order
Input: Cleaned dataframe
Output: Dataframe with dummy variables in correct order
'''
# Dummy variables for categorical data columns
df = pd.get_dummies(df, columns = ['Age', 'Gender', 'Education_level', 'Country', 'Ethnicity'],
drop_first = True)
# Reorder dataframe columns
df = df[['Age_25-34', 'Age_35-44', 'Age_45-54', 'Age_55-64', 'Age_65+', 'Gender_Male', 'Education_level_17',
'Education_level_18', 'Education_level_< 16', 'Education_level_Associates degree',
'Education_level_Bachelors degree', 'Education_level_Doctorate degree', 'Education_level_Masters degree',
'Education_level_Some college', 'Country_Canada', 'Country_Ireland', 'Country_New Zealand', 'Country_Other',
'Country_UK', 'Country_USA', 'Ethnicity_Black', 'Ethnicity_Mixed-Black/Asian', 'Ethnicity_Mixed-White/Asian',
'Ethnicity_Mixed-White/Black', 'Ethnicity_Other', 'Ethnicity_White', 'Neuroticism_score', 'Extraversion_score',
'Openness_score', 'Agreeableness_score', 'Conscientiousness_score', 'Impulsiveness', 'Sensation_seeing',
'Semer_fake_drug', 'Alcohol', 'Amphet', 'Amyl', 'Benzos', 'Caffeine', 'Cannabis', 'Chocolate', 'Cocaine',
'Crack', 'Ecstacy', 'Heroin', 'Ketamine', 'Legal_highs', 'LSD', 'Meth', 'Mushrooms', 'Nicotine']]
return df |
pub = "CD5F8A24C7605008897A3C922C0E812E769DE0A46442C350CB78C7868539F3D38AAC80B3E6A506605910E8599806B4D1D148F2F6B81DA04796A8A5AEE18F29E83E16775A2A0A00870541F6574ED1438636AE0A0C116E07104F48F72094863A3869E1C8FC220627278962FB22873E3156F18E55DEC94E970064EC7F4E0E88454012E2FD5DFE5F8D19BF170F9CCB3F46E0FD1019BCB02D9083A0703C617F996379E6478354A73AE6E6ACBCE1F4333ECFAF24366A3E977D3CD3CBFE8D8A387BD876BFDAB8488F6F47BF1FBE33010FD2D7E22B4DB2E567783CE0B606DB86B93759714C4F6396A7FB9F74C4021043B0F3D46D2633EBD43A877863DF7D680F506587C119DD64100CA831CE2AF33D951B524C5F06B49F5BF2CB381E74181930D06A80505C06ABD5BF4870F0C9FB581BD80DBA889660639F936EDEA8FE5D0C9EAE58062ED693252583C71CC782BA613E01438E69B43F9E64ECA84F9EA04E811AD7B39EFD7876D1B6B501C4F48ACCE6F24239F6C04028788135CD88C3D15BE0F2EBB7DE9E9C19A7A93037005EE0A9A640BADA332EC0D05EE9F08A832354A0487A927D5E88066E2569E6C5D4688E422BFA0B27C6171C6D7BF029BFD9165752AF19AA71B33A1EA70B6C371FB21E47F527D80B7D04F582AD9F9935AF723682DC01CA9880621870DECB7AD15648CDF4EF153016F3E6D87933B8EC54CFA1FDF87C467020A3E753"
n= long(pub,16)
for i in range (0,3):
f = open("myI"+str(i),"r")
myI = f.read()
m = long(myI)
q = n // m
print(n % m )
|
import turtle
tartaruga=turtle.Turtle()
tartaruga.hideturtle()
campo=turtle.Screen()
tartaruga.pensize(3)
lados=int(input('Introduza o número de lados do polígono: '))
comprimento=int(input('Introduza o comprimento dos lados do polígono: '))
cor_borda=input('Introduza a cor das bordas do polígono: ')
cor_interior=input('Introduza a cor do interior do polígono: ')
campo=turtle.Screen()
angulo_inicial=float((180*(lados-2)/lados))
angulo_final=float(180-angulo_inicial)
tartaruga.color(cor_interior)
tartaruga.pencolor(cor_borda)
tartaruga.begin_fill()
for i in range(lados):
tartaruga.forward(comprimento)
tartaruga.left(angulo_final)
tartaruga.end_fill()
campo.exitonclick() |
import collections
import os
import sys
import unittest
from importlib import import_module
from airflow.models import DagBag, DAG
class TestDagIntegrity(unittest.TestCase):
LOAD_SECOND_THRESHOLD = 2
DAG_FOLDER = "src/dags"
def setUp(self):
self.dagbag = DagBag(dag_folder=TestDagIntegrity.DAG_FOLDER, include_examples=False)
def test_import_dags(self):
self.assertFalse(
len(self.dagbag.import_errors),
'DAG import failures. Errors: {}'.format(
self.dagbag.import_errors
)
)
def test_name_uniqness(self):
sys.path.insert(0, TestDagIntegrity.DAG_FOLDER)
dags = []
dag_paths = {}
for f in os.listdir(TestDagIntegrity.DAG_FOLDER):
if f.startswith((".", "_")):
continue
m = import_module(f[:-3])
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
dags.append(m.dag)
dag_id = m.dag.dag_id
if dag_id not in dag_paths:
dag_paths[dag_id] = []
dag_paths[dag_id].append(f)
dag_ids = [dag.dag_id for dag in dags]
self.assertEqual(len(dag_ids), len(set(dag_ids)), "There is DAGs with the same ID. Duplicates are {0}.".format(
[(item, dag_paths[item]) for item, count in collections.Counter(dag_ids).items() if count > 1]))
def test_empty_dag(self):
for dag_id, dag in self.dagbag.dags.items():
if dag.folder == TestDagIntegrity.DAG_FOLDER:
self.assertTrue(len(dag.tasks) >= 1,
"In dag '{0}' there is no tasks".format(dag_id, dag))
def test_add_dependencies(self):
for dag_id, dag in self.dagbag.dags.items():
lone_wolfs = []
if len(dag.tasks) <= 1 or dag.folder != TestDagIntegrity.DAG_FOLDER:
continue
for task in dag.tasks:
relatives = task.get_direct_relative_ids(upstream=False).union(
task.get_direct_relative_ids(upstream=True))
if not relatives:
lone_wolfs.append(task)
self.assertEqual(len(lone_wolfs), 0,
"In dag '{0}' ({1}) there is tasks ({2}) without dependencies.".format(dag_id,
dag.filepath,
lone_wolfs))
|
import torch
import numpy as np
import os
import numpy as np
import matplotlib.pyplot as plt
try:
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import AllChem
from rdkit import RDLogger
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
ZINC250_BOND_DECODER = {1: Chem.rdchem.BondType.SINGLE, 2: Chem.rdchem.BondType.DOUBLE, 3: Chem.rdchem.BondType.TRIPLE}
RDKIT_IMPORTED = True
except:
print("[!] WARNING: rdkit could not be imported. No evaluation and visualizations of molecules will be possible.")
RDKIT_IMPORTED = False
try:
import cairosvg
SVG2PDF_IMPORTED = True
except:
print("[!] WARNING: cairosvg could not be imported. Visualizations of molecules cannot be converted to pdf.")
SVG2PDF_IMPORTED = False
def plot_dataset_statistics(dataset_class, data_root="../data/", show_plots=True):
dataset_class.load_dataset(data_root=data_root)
log_length_prior = dataset_class.get_length_prior(data_root=data_root)
length_distribution = (dataset_class.DATASET_NODES >= 0).sum(axis=1).astype(np.int32)
node_distribution = dataset_class.DATASET_NODES[np.where(dataset_class.DATASET_NODES >= 0)]
edge_distribution = [((dataset_class.DATASET_ADJENCIES == i).sum(axis=2).sum(axis=1)/2).astype(np.int32) for i in range(1, dataset_class.num_edge_types()+1)]
##################################
## Number of nodes distribution ##
##################################
if show_plots:
ax = visualize_histogram(data=length_distribution,
bins=log_length_prior.shape[0],
xlabel="Number of nodes",
ylabel="Number of graphs",
title_text="Node count distribution")
ax.set_xlim(5, 38)
plt.tight_layout()
plt.show()
length_count = np.bincount(length_distribution)
print("="*40)
print("Number of molecules per graph size")
print("-"*40)
for i in range(log_length_prior.shape[0]):
print("Graph size %i: %i" % (i, length_count[i]))
print("="*40)
############################
## Node type distribution ##
############################
if show_plots:
ax = visualize_histogram(data=node_distribution,
bins=np.max(node_distribution)+1,
xlabel="Node type",
ylabel="Number of nodes",
title_text="Node type distribution",
add_stats=False)
plt.tight_layout()
plt.show()
node_count = np.bincount(node_distribution)
node_log_prob = np.log(node_count) - np.log(node_count.sum())
print("="*40)
print("Distribution of node types")
print("-"*40)
for i in range(node_log_prob.shape[0]):
print("Node %i: %6.3f%% (%i) -> %4.2fbpd" % (i, np.exp(node_log_prob[i])*100.0, node_count[i], -(np.log2(np.exp(1))*node_log_prob[i])))
print("="*40)
############################
## Node type distribution ##
############################
if show_plots:
ax = visualize_histogram(data=edge_distribution,
bins=max([np.max(d) for d in edge_distribution])+1,
xlabel="Number of edges per type",
ylabel="Number of graphs",
title_text="Edge type distribution")
plt.tight_layout()
plt.show()
edge_overall_count = (length_distribution * (length_distribution-1) / 2).sum()
edge_count = np.array([d.sum() for d in edge_distribution])
edge_count_sum = edge_count.sum()
edge_count = np.concatenate([np.array([edge_overall_count-edge_count_sum]), edge_count], axis=0)
edge_log_prob = np.log(edge_count) - np.log(edge_overall_count)
print("="*40)
print("Distribution of edge types")
print("-"*40)
for i in range(edge_count.shape[0]):
print("Edge %i: %4.2f%% (%i) -> %4.2fbpd" % (i, np.exp(edge_log_prob[i])*100.0, edge_count[i], -(np.log2(np.exp(1))*edge_log_prob[i])))
print("="*40)
def visualize_histogram(data, bins, xlabel, ylabel, title_text, val_range=None, add_stats=True):
title_font = {'fontsize': 20, 'fontweight': 'bold'}
axis_font = {'fontsize': 16, 'fontweight': 'medium'}
ticks_font = {'fontsize': 12, 'fontweight': 'medium'}
fig, ax = plt.subplots(1, 1, figsize=(10,6))
if val_range is None:
val_range = (0, bins-1)
if isinstance(data, list):
ax.hist(data, bins=bins, range=val_range, alpha=0.8, label=["data_%i"%i for i in range(len(data))])
else:
ax.hist(data, bins=bins, range=val_range, alpha=0.6)
if add_stats:
ax.axvline(data.mean(), color='r', linewidth=2, label="Mean", ymax=0.9)
ax.axvline(np.median(data), color='b', linewidth=2, label="Median", ymax=0.9)
ax.set_xlabel(xlabel, fontdict=axis_font)
ax.set_ylabel(ylabel, fontdict=axis_font)
ax.tick_params(axis='both', labelsize=ticks_font["fontsize"])
ax.set_title(title_text, fontdict=title_font)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if add_stats or isinstance(data, list):
plt.legend()
return ax
def analyse_generations(dataset_class, nodes, adjacency, length):
eval_dict, valid_list = dataset_class.evaluate_generations(nodes, adjacency, length=length, full_valid_list=True)
print("="*50)
print("Eval dict")
print("-"*50)
for key in eval_dict:
print("%s: %s" % (str(key), str(eval_dict[key])))
print("Valid list: %s" % str(valid_list[:10]))
print("-"*50 + "\n")
node_list = np.concatenate([nodes[i,:length[i]] for i in range(length.shape[0])], axis=0)
node_distribution = np.bincount(node_list)
print("="*50)
print("Predicted node distribution")
print("-"*50)
for i in range(node_distribution.shape[0]):
print("Node %i: %4.2f%% (%i)" % (i, node_distribution[i]*100.0/node_list.shape[0], node_distribution[i]))
print("-"*50 + "\n")
edge_list = np.concatenate([adjacency[i,:length[i],:length[i]].reshape(-1) for i in range(length.shape[0])], axis=0)
edge_distribution = np.bincount(edge_list)
print("="*50)
print("Predicted edge distribution")
print("-"*50)
for i in range(edge_distribution.shape[0]):
print("Edge %i: %4.2f%% (%i)" % (i, edge_distribution[i]*100.0/edge_list.shape[0], edge_distribution[i]))
print("-"*50 + "\n")
return valid_list
def find_largest_submolecule(nodes, adjacency):
bin_adjacency = ((adjacency + np.eye(adjacency.shape[0])) > 0).astype(np.float32)
length = (nodes >= 0).sum()
nodes = nodes[:length]
bin_adjacency = bin_adjacency[:length,:length]
def _find_coverage(start_node):
cov_nodes = np.zeros(nodes.shape, dtype=np.float32)
old_cov = cov_nodes.copy()
cov_nodes[start_node] = 1
while np.abs(old_cov-cov_nodes).sum() > 0.0:
old_cov = cov_nodes.copy()
cov_nodes = ((bin_adjacency * cov_nodes[None,:]).sum(axis=-1) > 0).astype(np.float32)
return cov_nodes
node_coverage = _find_coverage(start_node=0)
largest_submolecule = np.where(node_coverage)[0]
while (largest_submolecule.shape[0] < node_coverage.shape[0]-node_coverage.sum()):
node_idx = [i for i in range(nodes.shape[0]) if node_coverage[i] == 0.0][0]
sub_coverage = _find_coverage(start_node=node_idx)
node_coverage = sub_coverage + node_coverage
sub_molecule = np.where(sub_coverage)[0]
if sub_molecule.shape[0] > largest_submolecule.shape[0]:
largest_submolecule = sub_molecule
nodes = nodes[largest_submolecule]
adjacency = adjacency[largest_submolecule][:,largest_submolecule]
return nodes, adjacency
def calculate_node_distribution(dataset_class):
node_distribution = dataset_class.DATASET_NODES[np.where(dataset_class.DATASET_NODES >= 0)]
node_count = np.bincount(node_distribution)
node_log_prob = np.log(node_count) - np.log(node_count.sum())
return node_log_prob
def calculate_edge_distribution(dataset_class):
length_distribution = (dataset_class.DATASET_NODES >= 0).sum(axis=1).astype(np.int32)
edge_distribution = [((dataset_class.DATASET_ADJENCIES == i).sum(axis=2).sum(axis=1)/2).astype(np.int32) for i in range(1, dataset_class.num_edge_types()+1)]
edge_count = np.array([d.sum() for d in edge_distribution])
edge_log_prob = np.log(edge_count) - np.log(edge_count.sum())
return edge_log_prob
def evaluate_generations(dataset_class, nodes, adjacency, length=None, full_valid_list=False, **kwargs):
global RDKIT_IMPORTED
if isinstance(nodes, torch.Tensor):
nodes = nodes.detach().cpu().numpy()
if isinstance(adjacency, torch.Tensor):
adjacency = adjacency.detach().cpu().numpy()
if length is not None and isinstance(length, torch.Tensor):
length = length.detach().cpu().numpy()
if not RDKIT_IMPORTED:
print("Skipped evaluation of generated molecules due to import error...")
return {}
eval_dict = {}
for allow_submolecule in [False, True]:
if length is not None:
all_mols = [dataset_class.graph_to_mol(nodes[i,:length[i]], adjacency[i,:length[i],:length[i]], allow_submolecule) for i in range(nodes.shape[0])]
else:
all_mols = [dataset_class.graph_to_mol(nodes[i], adjacency[i], allow_submolecule) for i in range(nodes.shape[0])]
valid = [dataset_class._check_validity(mol) for mol in all_mols]
binary_valid = [1 if mol is not None else 0 for mol in valid]
valid = [mol for mol, v in zip(all_mols, binary_valid) if v==1]
valid_ratio = len(valid)*1.0/len(all_mols)
valid_smiles = [Chem.MolToSmiles(mol) for mol in valid]
unique_smiles = list(set(valid_smiles))
unique_ratio = len(unique_smiles)*1.0/(max(len(valid_smiles), 1e-5))
novel = [(1 if dataset_class._check_novelty(mol=None, smiles=sm) else 0) for sm in valid_smiles]
novelty_ratio = sum(novel)*1.0/(max(len(novel), 1e-5))
inner_eval_dict = {
"valid_ratio": valid_ratio,
"unique_ratio": unique_ratio,
"novelty_ratio": novelty_ratio
}
if allow_submolecule:
inner_eval_dict = {"submol_" + key: inner_eval_dict[key] for key in inner_eval_dict}
eval_dict.update(inner_eval_dict)
if full_valid_list:
return eval_dict, binary_valid
else:
return eval_dict
def visualize_molecule(mol, filename="test_img"):
global RDKIT_IMPORTED, SVG2PDF_IMPORTED
if not RDKIT_IMPORTED:
print("[!] WARNING: Skipped visualization of molecules as rdkit is not imported.")
return
tmp=AllChem.Compute2DCoords(mol)
Draw.MolToFile(mol, filename+".svg", size=(400,400))
if SVG2PDF_IMPORTED:
cairosvg.svg2pdf(url=filename+".svg", write_to=filename+".pdf")
def calculate_length_prior(nodes):
length_distribution = (nodes >= 0).sum(axis=1).astype(np.int32)
length_count = np.bincount(length_distribution)
length_count = length_count.astype(np.float32) + 1e-5 # Smoothing to prevent log of zero
log_length_prior = np.log(length_count) - np.log(length_count.sum())
return log_length_prior |
from the_import import ProvincialClass as pc, imported_func
class Abra():
def __init__(self):
self.cadabra()
def cadabra(self):
print("cadabra")
def b():
Abra()
b()
pc()
HiddenClass() # this is probably too defensive
imported_func()
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 12:30:09 2018
@author: Laura
"""
from measures.algorithms.fair_ranker.runRankFAIR import initPAndAlpha, calculateP
from measures.algorithms.fair_ranker.test import FairnessInRankingsTester
def fairnessTestAtK(dataSetName, ranking, protected, unProtected, k):
"""
Calculates at which prefix the ranking starts to be unfair with respect to
the proportion of the protected group in the ranking. We use the statistical
test used for FA*IR to receive that prefix. We then normalize the prefix
with respect to the size of the given ranking (k). We will refer to that
measure as FairnessAtK.
@param dataSetName: Name of the data set, used to notify the user for which
data set a bigger p is needed if the proportions for that are too small.
@param ranking: list with candidates in the whole ranking
@param protected: list of candidate objects with membership of protected group
from the original data set
@param unprotected: list of candidate objects with membership of non-protected group
from the original data set
@param k: truncation point/length of the ranking
return the value for FairnessAtK
"""
ranking = ranking[:k]
#initialize p and alpha values for given k
pairsOfPAndAlpha = initPAndAlpha(k)
#calculates the percentage of protected items in the data set
p = calculateP(protected,unProtected,dataSetName,k)
pair = [item for item in pairsOfPAndAlpha if item[0] == p][0]
#initialize a FairnessInRankingsTester object
gft = FairnessInRankingsTester(pair[0], pair[1], k, correctedAlpha=True)
#get the index until the ranking can be considered as fair, m will equal true if the whole set is true
t, m = FairnessInRankingsTester.ranked_group_fairness_condition(gft, ranking)
if m == False:
#calculate and normalize Fairness@k
return t/len(ranking)
else:
#return 1.0 if everything is fair
return 1.0
|
import os
import json
import pandas as pd
from dotenv import load_dotenv
from pathlib import Path
from model import SiameseBiLSTM
from preprocess import build_vocab_and_transform, build_embeddings, build_train_data, build_test_data, build_padded_data
from evaluation import precision_m, recall_m, f1_m, confusion_matrix_m
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
TRAIN_PATH = os.getenv('TRAIN_PATH')
TEST_PATH = os.getenv('TEST_PATH')
WORD2VEC_PATH = os.getenv('WORD2VEC_PATH')
EMBEDDING_DIM = int(os.getenv('EMBEDDING_DIM'))
MAX_SEQ_LEN = int(os.getenv('MAX_SEQ_LEN'))
VAL_SIZE = float(os.getenv('VAL_SIZE'))
NUM_LSTM = int(os.getenv('NUM_LSTM'))
NUM_HIDDEN = int(os.getenv('NUM_HIDDEN'))
LSTM_DROPOUT = float(os.getenv('LSTM_DROPOUT'))
HIDDEN_DROPOUT = float(os.getenv('HIDDEN_DROPOUT'))
LEARNING_RATE = float(os.getenv('LEARNING_RATE'))
PATIENCE = int(os.getenv('PATIENCE'))
EPOCHS = int(os.getenv('EPOCHS'))
BATCH_SIZE = int(os.getenv('BATCH_SIZE'))
if __name__ == "__main__":
print('LOAD DATA ... ')
train_df = pd.read_csv(TRAIN_PATH)
test_df = pd.read_csv(TEST_PATH)
print('DATA LOADED...')
columns = ['generated_text', 'compared_text']
print('BUILD VOCABULARY AND TRANSFORM TRAIN DATA ... ')
train_df, vocab, inv_vocab, w2v = build_vocab_and_transform(train_df, columns, WORD2VEC_PATH, True)
print('BUILD EMBEDDINGS ... ')
embeddings = build_embeddings(w2v, EMBEDDING_DIM, vocab)
print('BUILD TRAIN DATA ... ')
X_train, X_val, Y_train, Y_val = build_train_data(train_df, columns, VAL_SIZE)
print('PAD DATA ... ')
X_train, X_val = build_padded_data(X_train, X_val, MAX_SEQ_LEN)
print('FINISH DATA PREPARATION \n')
print('INITIALIZE MODEL ... ')
model = SiameseBiLSTM(embeddings, EMBEDDING_DIM, MAX_SEQ_LEN, NUM_LSTM, NUM_HIDDEN, EPOCHS, BATCH_SIZE,
LSTM_DROPOUT, HIDDEN_DROPOUT, LEARNING_RATE, PATIENCE)
model_path, checkpoint_dir = model.train(X_train, X_val, Y_train, Y_val)
print('BUILD VOCABULARY AND TRANSFORM TEST DATA ... ')
test_df, vocab, inv_vocab, w2v = build_vocab_and_transform(test_df, columns, WORD2VEC_PATH, True)
print('BUILD TEST DATA ... ')
X_test, Y_test = build_test_data(test_df, MAX_SEQ_LEN)
results = model.test(model_path, X_test, Y_test)
file_obj = open(checkpoint_dir + '.json', 'w')
json.dump(results, file_obj)
print('TRAIN FINISHED, RESULTS AVAILABLE AT : ')
print(checkpoint_dir + '.json')
|
import numpy as np
class StandardScaler():
def __init__(self):
self.mean_ = None
self.scale_ = None
def fit(self, X):
''' 根据训练数据集获得数据的均值和方差
'''
assert X.ndim == 2, "The dimension of X must be 2"
self.mean_ = np.array(np.mean(X[:, i]) for i in range(X.shape[1]))
self.scale_ = np.array(np.std(X[:, i]) for i in range(X.shape[1]))
return self
def transform(self, X):
''' 将X根据StandardScaler进行均值方差归一化处理
'''
assert X.ndim == 2, "The dimension of X must be 2"
assert self.mean_ != None and self.scale_ != None, \
"must fit before transform"
assert X.shape[1] == self.mean_.shape[0], \
"the feature of X must be equal to mean_ and scale_"
resX = np.array(size=X.shape, dtype=float)
for col in range(X.shape[1]):
resX[:, col] = np.array((X[:, col] - self.mean_[col]) / self.scale_[col]
return resX |
from django.shortcuts import render
from .models import StudentData
def studentreg_view(request):
if request.method=="POST":
roll=request.POST.get('roll','')
sname=request.POST.get('sname','')
mobile=request.POST.get('mobile','')
fee=request.POST.get('fee','')
email=request.POST.get('email','')
address=request.POST.get('address','')
dod=request.POST.get('dod','')
gender=request.POST.get('gender','')
course=request.POST.get('course','')
institute=request.POST.get('institute','')
data=StudentData(
roll_number=roll,
student_name=sname,
mobile_number=mobile,
fee=fee,
email=email,
address=address,
dateofbirth=dod,
gender=gender,
courses=course,
institute_name=institute
)
data.save()
return render(request,'studentregform.html')
return render(request,'studentregform.html')
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django_declension.models import Word, DeclensionFail
admin.site.register(Word)
admin.site.register(DeclensionFail)
|
from rest_framework import generics, permissions
from .models import Jobs
from .serializers import JobsSerializer
# code was moved from API.py file as views will only be used for API releated actions so no reason to not just have everything in views
# creates an api list of all user Jobs as well as allowing for creation of a new Job under the users name
class JobsList(generics.ListCreateAPIView):
serializer_class = JobsSerializer
permission_classes = [
permissions.IsAuthenticated,
]
# this is what sets the queryset to only Jobs created by the logged in user
def get_queryset(self):
return self.request.user.jobs.all()
# allows for POST, PUT and DELETE requests to the api of the user is Authenticated
class JobsDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Jobs.objects.all()
serializer_class = JobsSerializer
permission_classes = [
permissions.IsAuthenticated,
]
# displays a list of all Jobs no matter which user made it, may be a better a way to display this infomration?
class JobsViewSetAll(generics.ListAPIView):
queryset = Jobs.objects.all()
serializer_class = JobsSerializer
permission_classes = [
permissions.IsAuthenticated,
] |
import os
import json
from os.path import dirname, realpath
from pathlib import Path
import logging
LOGGER = logging.getLogger("Configs")
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Configs(metaclass=Singleton):
def __init__(self):
self.current_dir = dirname(realpath(__file__))
config_path = Path(self.current_dir, "settings.json")
with open(config_path) as f:
self.settings = json.load(f)
def _check_mappings(self):
mapping_path = Path(self.current_dir, "mappings")
sample_mapping_path = Path(self.current_dir, "mappings.sample")
if not os.path.exists(mapping_path):
try:
os.rename(sample_mapping_path, mapping_path)
LOGGER.info("configs/mappings not found, create with mappings.sample")
except (OSError, IsADirectoryError, NotADirectoryError):
LOGGER.error(
"Failed to create mappings, check the configs/mappings directory"
)
raise RuntimeError
@property
def default_accounts(self) -> dict:
return self.settings["default_accounts"]
@property
def default_output(self) -> str:
return self.settings["default_output"]
@property
def custom_bean_keyword(self) -> str:
return self.settings["custom_bean_keyword"]
@property
def ignored_bean_keyword(self) -> str:
return self.settings["ignored_bean_keyword"]
@property
def general_map(self) -> dict:
map_path = Path(self.current_dir, "mappings/general.json")
with open(map_path) as f:
mappings = json.load(f)
return mappings
def get_map(self, name: str) -> dict:
path = Path(self.current_dir, f"mappings/{name}.json")
with open(path) as f:
mappings = json.load(f)
return mappings
|
import tensorflow as tf
groups = tf.constant([[0,1,2,3],[4,5,6,7]])
arr = tf.constant([[10,0],[20,0],[30,0],[40,0],[50,0],[60,0],[70,0],[80,0]])
output = tf.gather(arr, groups)
print(output) |
'''
Pytorch implementation of SeqSleepNet taking as input single channel signal
x: [bs, seq_len, Fs*30]
y: [bs, seq_len, num_classes]
Original SeqSleepNet implmentation includes following steps:
input x [bs, seq_len, 30*100]
1: send x to time-frequency representation obtaining x: [bs, seq_len, 29, 129] # 29 = 1 + (Fs*30-Fs*frame_size)/(Fs*frame_stride), 129 = 1 + NFFT/2
2: send x to filterbank obtaining x: [bs, seq_len, 29, 32] #
3: reshape x: [bs*seq_len, 29, 32] # (29,129)*(129,32) = (29,32)
4: send x to biRNN obtaining x: [bs*seq_len, 29, 64*2]
5: send x to an attention layer obtaining x: [bs*seq_len, 64*2]
6: reshape x: [bs, seq_len, 64*2]
7: send x to biRNN obtaining x: [bs, seq_len, 64*2]
8: send x to seq_len of fc layers obtaining x: [bs, seq_len, class_num]
9: compute loss
'''
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from utils import *
class BiGRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers):
super(BiGRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
def forward(self, x):
h0 = torch.randn(self.num_layers*2, x.size(0), self.hidden_size)
h0 = h0.cuda()
out, _ = self.gru(x, h0)
return out
class Parabit(nn.Module):
def __init__(self, seq_len, dim, class_num):
super(Parabit, self).__init__()
self.bits = []
self.seq_len = seq_len
for i in range(seq_len):
bit = nn.Linear(dim, class_num)
bit_name = 'seq_at_%d' % i
setattr(self, bit_name, bit)
self.bits.append(getattr(self, bit_name))
def forward(self, x):
bit_fcs = []
for i in range(self.seq_len):
xx = x[:,i,:]
fc = self.bits[i]
yy = fc(xx)
yy = yy.unsqueeze(1)
bit_fcs.append(yy)
torch_bits = torch.cat(bit_fcs, 1) # bs, seq_len, class_num
return torch_bits
class Bnet(nn.Module):
def __init__(self, filterbanks, ch_num, seq_len, class_num):
super(Bnet, self).__init__()
self.seq_len = seq_len
self.ch_num = ch_num
self.class_num = class_num
self.filterbanks = filterbanks
self.filterweight = Parameter(torch.randn(ch_num, 129, 32))
self.epoch_rnn = BiGRU(32, 64, 1)
self.attweight_w = Parameter(torch.randn(128, 64))
self.attweight_b = Parameter(torch.randn(64))
self.attweight_u = Parameter(torch.randn(64))
self.seq_rnn = BiGRU(64*2, 64, 1)
self.cls = Parabit(self.seq_len, 64*2, self.class_num)
def forward(self, x):
# x : [bs, seq_len, ch, 29, 129]
# return: [bs, seq_len, class_num]
# torch.mul -> element-wise dot; torch.matmul -> matrix multiplication
x = x.reshape(-1, self.ch_num, self.seq_len*29, 129) # [bs, ch, seq_len*29, 129]
# filterweight [ch, 129, 32] self.filterbanks [129, 32]
filterbank = torch.mul(self.filterweight, self.filterbanks) # [ch, 129, 32]
x = torch.matmul(x, filterbank) # [bs, seq_len*29, 32]
x = x.mean(1)
x = x.reshape(-1, 29, 32) # [bs*seq_len, 29, 32]
x = self.epoch_rnn(x) # [bs*seq_len, 29, 64*2]
# above is epoch-wise learning, below is seq-wise learning
v = torch.tanh(torch.matmul(torch.reshape(x, [-1, 128]), self.attweight_w) + torch.reshape(self.attweight_b, [1, -1])) # [bs*seq_len, 64]
vu = torch.matmul(v, torch.reshape(self.attweight_u, [-1, 1])) # [bs*seq_len*29, 64] * [64, 1] -> [bs*seq_len*29, 1]
exps = torch.reshape(torch.exp(vu), [-1, 29]) # [bs*seq_len*29, 1] -> [bs*seq_len, 29]
alphas = exps / torch.reshape(torch.sum(exps, 1), [-1, 1]) # [bs*seq_len, 1]
x = torch.sum(torch.mul(x, torch.reshape(exps, [-1, 29, 1])), 1) # [bs*seq_len, 29, 64*2]*[bs*seq_len, 29, 1] -> [bs*seq_len, 29, 64*2] -> [bs*seq_len, 64*2]
x = torch.reshape(x, [-1, self.seq_len, 64*2]) # [bs, seq_len, 64*2]
x = self.seq_rnn(x) # [bs, seq_len, 64*2]
x = self.cls(x)
return x
class Snet(nn.Module):
def __init__(self):
super(Snet, self).__init__()
self.cls = Parabit(128, 128, 2)
def forward(self, x):
out = self.cls(x)
return out
class Pnet(nn.Module):
def __init__(self):
super(Pnet, self).__init__()
self.cls = Parabit(128, 128, 5)
def forward(self, x):
out = self.cls(x)
return out
if __name__ == '__main__':
batch_size = 32
seq_len = 20
class_num = 5
ch_num = 3
inputs = torch.rand(batch_size, seq_len, ch_num, int(100*30)) # [bs, seq_len, 30*100]
inputs = preprocessing(inputs) # [bs, seq_len, 29, 129]
inputs = inputs.cuda()
print(inputs.shape)
filterbanks= torch.from_numpy(lin_tri_filter_shape(32, 256, 100, 0, 50)).to(torch.float) # [129, 32]
filterbanks= filterbanks.cuda()
bnet = Bnet(filterbanks=filterbanks, seq_len=seq_len, ch_num=ch_num, class_num=class_num)
bnet = bnet.cuda()
bout = bnet(inputs)
print(bout.shape)
'''
snet = Snet()
sout = snet(bout)
print(sout.shape)
pnet = Pnet()
bseg = torch.max(sout, dim=2)[1]
pin = seg_pool(bout, bseg)
pout = pnet(pin)
print('bout: {}\nsout: {}\npout: {}'.format(bout.shape, sout.shape, pout.shape))
print('params of bnet: {}'.format(sum(torch.numel(p) for p in bnet.parameters())))
print('params of snet: {}'.format(sum(torch.numel(p) for p in snet.parameters())))
print('params of pnet: {}'.format(sum(torch.numel(p) for p in pnet.parameters())))
'''
|
#!/usr/bin/python3
"""
Object to serialization.
"""
def class_to_json(obj):
"""functionthat describes a dictionary for JSON serialization
of an object.
Arg:
obj: object to serialization.
Return the dictionary description with simple data structure.
"""
return obj.__dict__
|
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __repr__(self):
return "[{},{}]".format(self.start, self.end)
class Solution(object):
def open_ratio(self, open_times, query_time):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
Time: O(n)
Space: O(1)
"""
accrued_time = 0.0
if not open_times or not query_time:
return accrued_time
for r_time in open_times:
if query_time.start <= r_time.start <= query_time.end <= r_time.end:
accrued_time += query_time.end - r_time.start
elif query_time.start <= r_time.start <= r_time.end <= query_time.end:
accrued_time += r_time.end - r_time.start
elif r_time.start <= query_time.start <= query_time.end <= r_time.end:
accrued_time += query_time.end - query_time.start
elif r_time.start <= query_time.start <= r_time.end <= query_time.end:
accrued_time += r_time.end - query_time.start
return accrued_time / (query_time.end - query_time.start)
class Solution2(object):
def sort_ratings(self, ratings):
ratings = [line.split(' ') for line in ratings.split('\n')]
return sorted(ratings, key=lambda x: x[1], reverse=True)
if __name__ == '__main__':
ip1 = [Interval(0, 24)]
q1 = Interval(4, 10)
print(Solution().open_ratio(ip1, q1))
ip2 = [Interval(4, 10), Interval(13, 16)]
q2 = Interval(0, 24)
print(Solution().open_ratio(ip2, q2))
ip3 = [Interval(7, 10), Interval(11, 15)]
q3 = Interval(9, 12)
print(Solution().open_ratio(ip3, q3))
print(Solution2().sort_ratings("1005 2\n1001 5\n1002 5\n1004 1"))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from itertools import product
from unittest import mock
import torch
from botorch.acquisition.multi_objective.objective import (
MCMultiOutputObjective,
UnstandardizeMCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import (
compute_sample_box_decomposition,
get_default_partitioning_alpha,
prune_inferior_points_multi_objective,
random_search_optimizer,
sample_optimal_points,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.model_list_gp_regression import ModelListGP
from botorch.models.transforms.outcome import Standardize
from botorch.utils.gp_sampling import get_gp_samples
from botorch.utils.multi_objective import is_non_dominated
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from torch import Tensor
class TestUtils(BotorchTestCase):
def test_get_default_partitioning_alpha(self):
for m in range(2, 7):
expected_val = 0.0 if m < 5 else 10 ** (-8 + m)
self.assertEqual(
expected_val, get_default_partitioning_alpha(num_objectives=m)
)
# In `BotorchTestCase.setUp` warnings are filtered, so here we
# remove the filter to ensure a warning is issued as expected.
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as ws:
self.assertEqual(0.1, get_default_partitioning_alpha(num_objectives=7))
self.assertEqual(len(ws), 1)
class DummyMCMultiOutputObjective(MCMultiOutputObjective):
def forward(self, samples: Tensor) -> Tensor:
return samples
class TestMultiObjectiveUtils(BotorchTestCase):
def setUp(self):
super().setUp()
self.model = mock.MagicMock()
self.objective = DummyMCMultiOutputObjective()
self.X_observed = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
self.X_pending = torch.tensor([[1.0, 3.0, 4.0]])
self.mc_samples = 250
self.qmc = True
self.ref_point = [0.0, 0.0]
self.Y = torch.tensor([[1.0, 2.0]])
self.seed = 1
def test_prune_inferior_points_multi_objective(self):
tkwargs = {"device": self.device}
for dtype in (torch.float, torch.double):
tkwargs["dtype"] = dtype
X = torch.rand(3, 2, **tkwargs)
ref_point = torch.tensor([0.25, 0.25], **tkwargs)
# the event shape is `q x m` = 3 x 2
samples = torch.tensor([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]], **tkwargs)
mm = MockModel(MockPosterior(samples=samples))
# test that a batched X raises errors
with self.assertRaises(UnsupportedError):
prune_inferior_points_multi_objective(
model=mm, X=X.expand(2, 3, 2), ref_point=ref_point
)
# test that a batched model raises errors (event shape is `q x m` = 3 x m)
mm2 = MockModel(MockPosterior(samples=samples.expand(2, 3, 2)))
with self.assertRaises(UnsupportedError):
prune_inferior_points_multi_objective(
model=mm2, X=X, ref_point=ref_point
)
# test that invalid max_frac is checked properly
with self.assertRaises(ValueError):
prune_inferior_points_multi_objective(
model=mm, X=X, max_frac=1.1, ref_point=ref_point
)
# test basic behaviour
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point
)
self.assertTrue(torch.equal(X_pruned, X[[-1]]))
# test unstd objective
unstd_obj = UnstandardizeMCMultiOutputObjective(
Y_mean=samples.mean(dim=0), Y_std=samples.std(dim=0), outcomes=[0, 1]
)
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point, objective=unstd_obj
)
self.assertTrue(torch.equal(X_pruned, X[[-1]]))
# test constraints
samples_constrained = torch.tensor(
[[1.0, 2.0, -1.0], [2.0, 1.0, -1.0], [3.0, 4.0, 1.0]], **tkwargs
)
mm_constrained = MockModel(MockPosterior(samples=samples_constrained))
X_pruned = prune_inferior_points_multi_objective(
model=mm_constrained,
X=X,
ref_point=ref_point,
objective=unstd_obj,
constraints=[lambda Y: Y[..., -1]],
)
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test non-repeated samples (requires mocking out MockPosterior's rsample)
samples = torch.tensor(
[[[3.0], [0.0], [0.0]], [[0.0], [2.0], [0.0]], [[0.0], [0.0], [1.0]]],
device=self.device,
dtype=dtype,
)
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point
)
self.assertTrue(torch.equal(X_pruned, X))
# test max_frac limiting
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point, max_frac=2 / 3
)
if self.device.type == "cuda":
# sorting has different order on cuda
self.assertTrue(
torch.equal(X_pruned, X[[2, 1]]) or torch.equal(X_pruned, X[[1, 2]])
)
else:
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test that zero-probability is in fact pruned
samples[2, 0, 0] = 10
with mock.patch.object(MockPosterior, "rsample", return_value=samples):
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm, X=X, ref_point=ref_point
)
self.assertTrue(torch.equal(X_pruned, X[:2]))
# test marginalize_dim and constraints
samples = torch.tensor([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]], **tkwargs)
samples = samples.unsqueeze(-3).expand(
*samples.shape[:-2],
2,
*samples.shape[-2:],
)
mm = MockModel(MockPosterior(samples=samples))
X_pruned = prune_inferior_points_multi_objective(
model=mm,
X=X,
ref_point=ref_point,
objective=unstd_obj,
constraints=[lambda Y: Y[..., -1] - 3.0],
marginalize_dim=-3,
)
self.assertTrue(torch.equal(X_pruned, X[:2]))
def test_compute_sample_box_decomposition(self):
tkwargs = {"device": self.device}
for dtype, maximize in product((torch.float, torch.double), (True, False)):
tkwargs["dtype"] = dtype
# test error when inputting incorrect Pareto front
X = torch.rand(4, 3, 2, 1, **tkwargs)
with self.assertRaises(UnsupportedError):
compute_sample_box_decomposition(pareto_fronts=X, maximize=maximize)
# test single and multi-objective setting
for num_objectives in (1, 5):
X = torch.rand(4, 3, num_objectives, **tkwargs)
bd1 = compute_sample_box_decomposition(
pareto_fronts=X, maximize=maximize
)
# assess shape
self.assertTrue(bd1.ndim == 4)
self.assertTrue(bd1.shape[-1] == num_objectives)
self.assertTrue(bd1.shape[-3] == 2)
if num_objectives == 1:
self.assertTrue(bd1.shape[-2] == 1)
# assess whether upper bound is greater than lower bound
self.assertTrue(torch.all(bd1[:, 1, ...] - bd1[:, 0, ...] >= 0))
# test constrained setting
num_constraints = 7
bd2 = compute_sample_box_decomposition(
pareto_fronts=X,
maximize=maximize,
num_constraints=num_constraints,
)
# assess shape
self.assertTrue(bd2.ndim == 4)
self.assertTrue(bd2.shape[-1] == num_objectives + num_constraints)
self.assertTrue(bd2.shape[-2] == bd1.shape[-2] + 1)
self.assertTrue(bd2.shape[-3] == 2)
# assess whether upper bound is greater than lower bound
self.assertTrue(torch.all(bd2[:, 1, ...] - bd2[:, 0, ...] >= 0))
# the constraint padding should not change the box-decomposition
# if the box-decomposition procedure is not random
self.assertTrue(torch.equal(bd1, bd2[..., 0:-1, 0:num_objectives]))
# test with a specified optimum
opt_X = 2.0 if maximize else -3.0
X[:, 0, :] = opt_X
bd3 = compute_sample_box_decomposition(
pareto_fronts=X, maximize=maximize
)
# check optimum
if maximize:
self.assertTrue(torch.all(bd3[:, 1, ...] == opt_X))
else:
self.assertTrue(torch.all(bd3[:, 0, ...] == opt_X))
def get_model(
dtype,
device,
num_points,
input_dim,
num_objectives,
use_model_list,
standardize_model,
):
torch.manual_seed(123)
tkwargs = {"dtype": dtype, "device": device}
train_X = torch.rand(num_points, input_dim, **tkwargs)
train_Y = torch.rand(num_points, num_objectives, **tkwargs)
if standardize_model:
if use_model_list:
outcome_transform = Standardize(m=1)
else:
outcome_transform = Standardize(m=num_objectives)
else:
outcome_transform = None
if use_model_list and num_objectives > 1:
model = ModelListGP(
*[
SingleTaskGP(
train_X=train_X,
train_Y=train_Y[:, i : i + 1],
outcome_transform=outcome_transform,
)
for i in range(num_objectives)
]
)
else:
model = SingleTaskGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=outcome_transform,
)
return model.eval(), train_X, train_Y
class TestThompsonSampling(BotorchTestCase):
def test_random_search_optimizer(self):
torch.manual_seed(1)
input_dim = 3
num_initial = 5
tkwargs = {"device": self.device}
optimizer_kwargs = {
"pop_size": 1000,
"max_tries": 5,
}
for (
dtype,
maximize,
num_objectives,
use_model_list,
standardize_model,
) in product(
(torch.float, torch.double),
(True, False),
(1, 2),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
num_points = num_objectives
model, X, Y = get_model(
num_points=num_initial,
input_dim=input_dim,
num_objectives=num_objectives,
use_model_list=use_model_list,
standardize_model=standardize_model,
**tkwargs,
)
model_sample = get_gp_samples(
model=model,
num_outputs=num_objectives,
n_samples=1,
)
input_dim = X.shape[-1]
# fake bounds
bounds = torch.zeros((2, input_dim), **tkwargs)
bounds[1] = 1.0
pareto_set, pareto_front = random_search_optimizer(
model=model_sample,
bounds=bounds,
num_points=num_points,
maximize=maximize,
**optimizer_kwargs,
)
# check shape
self.assertTrue(pareto_set.ndim == 2)
self.assertTrue(pareto_front.ndim == 2)
self.assertTrue(pareto_set.shape[-1] == X.shape[-1])
self.assertTrue(pareto_front.shape[-1] == Y.shape[-1])
self.assertTrue(pareto_front.shape[-2] == pareto_set.shape[-2])
num_optimal_points = pareto_front.shape[-2]
# check if samples are non-dominated
weight = 1.0 if maximize else -1.0
count = torch.sum(is_non_dominated(Y=weight * pareto_front))
self.assertTrue(count == num_optimal_points)
# Ask for more optimal points than query evaluations
with self.assertRaises(RuntimeError):
random_search_optimizer(
model=model_sample,
bounds=bounds,
num_points=20,
maximize=maximize,
max_tries=1,
pop_size=10,
)
def test_sample_optimal_points(self):
torch.manual_seed(1)
input_dim = 3
num_initial = 5
tkwargs = {"device": self.device}
optimizer_kwargs = {
"pop_size": 100,
"max_tries": 1,
}
num_samples = 2
num_points = 1
for (
dtype,
maximize,
num_objectives,
opt_kwargs,
use_model_list,
standardize_model,
) in product(
(torch.float, torch.double),
(True, False),
(1, 2),
(optimizer_kwargs, None),
(False, True),
(False, True),
):
tkwargs["dtype"] = dtype
model, X, Y = get_model(
num_points=num_initial,
input_dim=input_dim,
num_objectives=num_objectives,
use_model_list=use_model_list,
standardize_model=standardize_model,
**tkwargs,
)
input_dim = X.shape[-1]
bounds = torch.zeros((2, input_dim), **tkwargs)
bounds[1] = 1.0
# check the error when asking for too many optimal points
if num_objectives == 1:
with self.assertRaises(UnsupportedError):
sample_optimal_points(
model=model,
bounds=bounds,
num_samples=num_samples,
num_points=2,
maximize=maximize,
optimizer=random_search_optimizer,
optimizer_kwargs=opt_kwargs,
)
pareto_sets, pareto_fronts = sample_optimal_points(
model=model,
bounds=bounds,
num_samples=num_samples,
num_points=num_points,
maximize=maximize,
optimizer=random_search_optimizer,
optimizer_kwargs=opt_kwargs,
)
# check shape
ps_desired_shape = torch.Size([num_samples, num_points, input_dim])
pf_desired_shape = torch.Size([num_samples, num_points, num_objectives])
self.assertTrue(pareto_sets.shape == ps_desired_shape)
self.assertTrue(pareto_fronts.shape == pf_desired_shape)
|
#!/usr/bin/env python
import sys
import os
def indent(n):
ind = ''
for i in range(0, n):
ind += ' '
return ind
def dBraces(text):
return text.replace('{', '{{').replace('}', '}}')
def printMulti(text, prefix, suffix):
first = True
for line in text.split('\n'):
if first:
first = False
else:
print suffix
if line == '':
sys.stdout.write(line)
else:
sys.stdout.write(prefix + line)
print
def generateMacrosDefUndef(macros, macrosDef = [], macrosUnd = []):
if len(macros) == 0:
return
print '#ifndef ' + macros[0][0] + '\\\n'
for name,value in macros:
print ' #define ' + name + ' \\'
printMulti(value, indent(2), ' \\')
print
print '#else\n'
for name,value in macros:
print ' #undef ' + name.split('(')[0]
for name in macrosUnd:
print ' #undef ' + name.split('(')[0]
print '\n#endif\n'
for name,value in macrosDef:
print '#define ' + name + ' \\'
printMulti(value, indent(1), ' \\')
print
def nspMacros(nsp):
return [['MY_NSP_START', 'CHILA_LIB_MISC__DEF_NAMESPACE_VAR(' + nsp + ')'],
['MY_NSP_END', 'CHILA_LIB_MISC__CLOSE_DEF_NAMESPACE_VAR(' + nsp + ')']]
def getNamespace(file):
print '//' + file
names = []
started = False
for name in file.split('/'):
if name == 'chila' or name == 'py.com.personal' or name == "py_com_personal":
started = True
if started:
if name == "py_com_personal":
names.append('py.com.personal')
else:
names.append(name)
names.pop()
ret = ''
for name in names:
if len(ret):
ret += '.'
ret += name
return ret
def nspMacrosFF(file):
return nspMacros(getNamespace(file).replace('.', ','))
def addNspToMacros(nsp, macros):
ret = []
upNsp = nsp.replace('.', '_').upper()
for name,value in macros:
ret.append([upNsp + "__" + name, value])
return ret;
def addNspToMacrosFF(file, macros):
return addNspToMacros(getNamespace(file), macros)
def unstrMacroArg(arg):
return '" + ' + arg + ' + "'
|
# Given a list of numbers, you should find the sum of these numbers.
# Your solution should not contain any of the banned words, even as a part of another word.
#
# The list of banned words are as follows:
#
# sum
# import
# for
# while
# reduce
# Input:
# A list of numbers.
#
# Output:
# The sum of numbers.
#
# Example:
# checkio([1, 2, 3]) == 6
# checkio([2, 2, 2, 2, 2, 2]) == 12
#
# How it is used:
# This task challenges your creativity to come up with a solution to fit this mission's specs!
#
# Precondition:
# The small amount of data. Let's creativity win!
def checkio(data):
if not data:
return 0
return data[0] + checkio(data[1:])
def checkio_1(data):
d = map(str, data)
return eval('+'.join(d))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.