text stringlengths 38 1.54M |
|---|
'''
Created on Oct 11, 2014
@author: Jake
Adapted from Eli Benersky's blog on generating sentences from a CFG
http://eli.thegreenplace.net/2010/01/28/generating-random-sentences-from-a-context-free-grammar/
'''
# -*- coding: utf-8 -*-
from collections import defaultdict
import random, copy
class CFG:
'''
A CFG object that has the various productions recorded
Attributes:
prod -- Every production in the grammar, as a dictionary
The form is prod[lhs] : (rhs),(rhs),(rhs),...
The lhs (left hand side) is simply the name of the lhs node and the domain
The rhs (right hand side) is a tuple of child names (allowing multiple lines)
'''
def __init__(self):
# Default the productions to an empty default dictionary
self.prod = defaultdict(list)
def addProd(self,lhs,rhs,fs,head):
'''
Add a line to the grammar. rhs (right hand side) symbols are
separated by a ', '
For example:
grammar.addProd('S','NP, VP')
Keyword Arguments:
lhs -- Left hand side of the production ('S')
rhs -- Right hand side of the production ('NP, VP')
fs -- Feature Structure of this production. None if there is
not one
head -- the head node of this production. Terminal nodes will
not have one. This determines which path features need
to be passed down.
'''
self.prod[lhs].append((rhs,fs,head))
def constructGrammar(self,cfg='test'):
f = open(cfg)
lines = f.readlines()
f.close()
for line in lines:
if not line.startswith('#') and len(line) > 1:
l = line[:line.find(' -->')]
lhs = (l[:l.find('[')],l[l.find('Domain=')+8:-2 if l.find(',') == -1 else l.find(',')-1])
if l.find(',') != -1:
fs = self.makeFeatureStructure(l[l.find(',')+1:-1])
else:
fs = None
r = line[line.find('-->')+4:-1]
rhs = []
head = None
i = 0
for item in r.split(', '):
if item.find('-Head')!=-1:
head = i
item = item[:item.find('-Head')] + item[item.find('-Head')+5:]
if item.find('[') != -1:
rhs.append((item[:item.find('[')],item[item.find('Domain=')+8:-2 if item.find(',') == -1 else item.find(',')-1]))
else:
rhs.append((item,None))
i += 1
self.addProd(lhs, tuple(rhs),fs,head)
def makeFeatureStructure(self,fsString):
'''
Make a feature structure in the form
{feat1:feat1Val,feat2:feat2Val,...}
from a given string in the form:
"Feat1='f1v',Feat2='f2v'..."
'''
fs = {}
for i in fsString.split(','):
fs[i[:i.find('=')]] = i[i.find('\'')+1:-1]
return fs
def genRandomSentence(self, symbol, domain=None):
'''
Generate a random sentence from the grammar, starting with the
given start symbol.
Keyword Arguments:
symbol -- Start symbol for the sentence (or the node in
recursion)
'''
sentence = []
# Select a domain if none is given
if domain == None:
domain = self.chooseRandomDomain(symbol)
randProd = random.choice(self.prod[(symbol,domain)])
for sym in randProd[0]:
# for non-terminals, recurse
if sym in self.prod:
sentence += self.genRandomSentence(sym[0],sym[1])
else:
sentence.append(sym[0])
return sentence
def genRandomSentenceFS(self,symbol,domain=None,fs=None):
'''
Generate a random sentence from the grammar, starting with the
given start symbol. A domain is optional, as a random domain
will be chosen if none is supplied.
Keyword Arguments:
symbol -- Start symbol for the sentence (or the node in
recursion)
domain -- The domain of the start symbol. If none is supplied,
a random one (possible for the symbol) will be
chosen.
fs -- The feature structure needed to unify with the symbol.
Should start as None. Only passes down if the child is
the head child of the production.
'''
sentence = []
# Select a domain if none is given
if domain == None:
domain = self.chooseRandomDomain(symbol)
# Select a random production of the symbol which can unify with
# the passed down feature structure. If none are found, fail
prods = copy.copy(self.prod[(symbol,domain)])
r = random.randint(0,len(prods)-1)
randProd = prods[r]
del prods[r]
newfs = self.unify(fs,randProd[1])
while newfs == None and randProd[1] != None and len(prods) > 0:
r = random.randint(0,len(prods)-1)
randProd = prods[r]
del prods[r]
newfs = self.unify(fs,randProd[1])
if newfs == None and randProd[1] != None:
return None
# Recurse through the child symbols. If a failed route is found
# fail out
i = 0
for sym in randProd[0]:
# For non-terminals, recurse
if sym in self.prod:
topassfs = None
if randProd[2] == i:
topassfs = newfs
s = self.genRandomSentenceFS(sym[0], sym[1], topassfs)
if s == None:
return None
sentence += s[0]
old = newfs
newfs = self.unify(newfs,s[1])
if old != None and newfs == None:
return None
# Otherwise simply add the symbol to the sentence
else:
sentence.append(sym[0])
i += 1
return [sentence,newfs]
def chooseRandomDomain(self,symbol):
'''
A helper method for generation. Chooses a random domain
from the possible domains for a symbol.
'''
s = set()
for i in self.prod:
if i[0] == symbol and len(i) > 1:
s.add(i[1])
return random.choice(list(s))
def unify(self,fs1,fs2):
# Make an new fs that includes the features of both given
newfs = {}
if fs1 != None:
for key in fs1:
newfs[key] = fs1[key]
if fs2 != None:
for key in fs2:
if key in newfs:
if newfs[key] != fs2[key]:
if newfs[key].find('?') != -1: # The first feature had one that needs filled in
newfs[key] = fs2[key]
elif fs2[key].find('?') != -1: # The second had one that needs filled in (so keep the first)
break
else: # The two were not equal. Fail
return None
else:
newfs[key] = fs2[key]
if len(newfs.keys()) == 0:
return None
else:
return newfs
def genRandomSentenceConvergent(self,symbol,cfactor=0.25,pcount=defaultdict(int),depth=30):
'''
Generate a random sentence from the grammar, starting at the
given start symbol.
Uses a convergent algorithm - productions that have already
appeared in the derivation on each branch have a smaller
chance to be selected.
Keyword Arguments:
symbol -- Start symbol for the sentence (or the node in
recursion)
cfactor -- Controls how tight the convergence is.
0.0 < cfactor < 1.0
pcount -- Internally used by recursive calls to pass on the
recursive calls to pass on the productions that have
been used in the branch.
depth -- Maximum depth to recurse. If this dips below 0, fail.
'''
if depth > 0:
sentence = []
# The possible productions of this symbol are weighted by their
# appearance in the branch that has led to this symbol in the
# derivation
weights = []
for prod in self.prod[symbol]:
if prod in pcount:
weights.append(cfactor ** (pcount[prod]))
else:
weights.append(1.0)
randProd = self.prod[symbol][self.weightedChoice(weights)]
# pcount is a single object (created in the first call to this
# method) that is being passed around into recursive calls to
# count how many times productions have been used.
# Before recursive calls, the count is updated; after the
# sentence for the this call is ready, it is rolled back to
# avoid modifying the parent's pcount.
pcount[randProd] += 1
for sym in randProd:
# for non-terminals, recurse
if sym in self.prod:
s = self.genRandomSentenceConvergent(sym, cfactor=cfactor, pcount=pcount,depth=depth-1)
if s != None:
sentence += s
else:
return s
else:
sentence.append(sym)
# backtracking: clear the modification to pcount
pcount[randProd] -= 1
return sentence
def genRandomSentenceConvergentFS(self,symbol,domain=None,fs=None,cfactor=0.25,pcount=defaultdict(int),depth=30):
'''
Generate a random sentence from the grammar, starting at the
given start symbol and optional domain.
Uses a convergent algorithm - productions that have already
appeared in the derivation on each branch have a smaller
chance to be selected.
Keyword Arguments:
symbol -- Start symbol for the sentence (or the node in
recursion)
domain -- The domain of the start symbol. If none is supplied,
a random one (possible for the symbol) will be
chosen.
fs -- The feature structure needed to unify with the symbol.
Should start as None. Only passes down if the child is
the head child of the production.
cfactor -- Controls how tight the convergence is.
0.0 < cfactor < 1.0
pcount -- Internally used by recursive calls to pass on the
recursive calls to pass on the productions that have
been used in the branch.
depth -- Maximum depth to recurse. If this dips below 0, fail.
'''
if depth > 0:
sentence = []
# Select a domain if none is given
if domain == None:
domain = self.chooseRandomDomain(symbol)
# # The possible productions of this symbol are weighted by their
# # appearance in the branch that has led to this symbol in the
# # derivation
# weights = []
# for prod in self.prod[symbol]:
# if prod in pcount:
# weights.append(cfactor ** (pcount[prod]))
# else:
# weights.append(1.0)
# Select a random production of the symbol which can unify with
# the passed down feature structure. If none are found, fail
prods = copy.copy(self.prod[(symbol,domain)])
r = random.randint(0,len(prods)-1)
randProd = prods[r]
del prods[r]
newfs = self.unify(fs,randProd[1])
if depth > 99:
print fs, randProd[1], len(prods), newfs, depth, newfs == None and randProd[1] != None and len(prods) > 0
while newfs == None and randProd[1] != None and len(prods) > 0:
if depth > 99:
print fs, randProd[1], depth
r = random.randint(0,len(prods)-1)
randProd = prods[r]
del prods[r]
newfs = self.unify(fs,randProd[1])
if newfs == None and randProd[1] != None:
return None
# pcount is a single object (created in the first call to this
# method) that is being passed around into recursive calls to
# count how many times productions have been used.
# Before recursive calls, the count is updated; after the
# sentence for the this call is ready, it is rolled back to
# avoid modifying the parent's pcount.
pcount[randProd[0]] += 1
print randProd[0]
# Recurse through the child symbols. If a failed route is found
# fail out
i = 0
for sym in randProd[0]:
# For non-terminals, recurse
if sym in self.prod:
topassfs = None
if randProd[2] == i:
topassfs = newfs
s = self.genRandomSentenceConvergentFS(sym[0], sym[1], topassfs, cfactor=cfactor, pcount=pcount,depth=depth-1)
if s == None:
return None
sentence += s[0]
old = newfs
newfs = self.unify(newfs,s[1])
if old != None and newfs == None:
return None
# Otherwise simply add the symbol to the sentence
else:
sentence.append(sym[0])
i += 1
return [sentence,newfs]
def weightedChoice(self,weights):
'''
Helper method used in making a weighted path choice in the
genRandomSentenceConvergence method.
'''
rnd = random.random()*sum(weights)
for i,w in enumerate(weights):
rnd -= w
if rnd < 0:
return i
def toString(self):
s = ''
for key in self.prod:
for p in self.prod[key]:
s += key[0] + '[Domain=\'' + key[1] + '\']' + ' --> '
for i in p:
if i[1] != None:
s += i[0] + '[Domain=\'' + i[1] + '\']' + ', '
else:
s += i[0] + ', '
s = s[:-2] + '\n'
return s
# grammar = CFG()
# grammar.constructGrammar('../Semantics/grammar.fcfg')
# for i in range(10):
# # s = grammar.genRandomSentence('S')
# # s = grammar.genRandomSentenceConvergent('S',cfactor=0.05,depth=20)
# s = grammar.genRandomSentenceConvergentFS('S')
# while s == None:
# s = grammar.genRandomSentenceConvergentFS('S')
# # s = grammar.genRandomSentenceConvergent('S',cfactor=0.05,depth=20)
# print s[0]
# f = open('test2','w')
# f.write(grammar.toString())
# f.close() |
import gin
import logging
import absl
import tensorflow as tf
from tensorflow.python.util.deprecation import _PRINT_DEPRECATION_WARNINGS
import pathlib
import shutil
from tune import hyperparameter_tuning
from train import Trainer
from input_pipeline.dataset_loader import DatasetLoader
from utils import utils_params, utils_misc
from models.architectures import load_model, TransferLearningModel, inception_like, vgg_like
from evaluation.evaluation import Evaluator
from evaluation.ensemble import StackingEnsemble
from evaluation.visualization import GradCAM
FLAGS = absl.flags.FLAGS
# Use --train or --train=true to set this flag, or nothing, true is default.
# Use --notrain or --train=false to set this flag to false.
absl.flags.DEFINE_boolean(name='train', default=False, help='Specify whether to train a model.')
absl.flags.DEFINE_boolean(name='eval', default=False, help='Specify whether to evaluate a model.')
absl.flags.DEFINE_boolean(name='ensem', default=False, help='Specify whether to use ensemble learning.')
# Configure the number of threads used by tensorflow
# tf.config.threading.set_intra_op_parallelism_threads(3)
# tf.config.threading.set_inter_op_parallelism_threads(3)
def _deep_visualization(model_architecture, test_dataset, dataset_info, run_paths) -> None:
# model_architecture_1 = inception_like(input_shape=dataset_info['image']['shape'],
# number_of_classes=dataset_info['label']['num_classes'])
# model_architecture_2 = TransferLearningModel(model_type="inception_v3",
# input_shape=dataset_info['image']['shape'],
# number_of_classes=dataset_info['label']['num_classes'])
# model_architecture_2 = model_architecture_2.expand_base_model_and_build_functional_model()
# y = model_architecture_1(tf.ones(shape=(0, *dataset_info_custom['image']['shape'])))
# input = tf.ones(shape=(0, *dataset_info['image']['shape']))
# y = model_architecture_2(input)
# print(model_architecture_2.input)
# print(model_architecture_2.get_layer("conv5_block3_out").output)
# print(model_architecture_2.output)
grad_cam_obj = GradCAM(model_architecture, run_paths)
grad_cam_obj.visualize(test_dataset)
def setup_checkpoint_loading(model_name, resume_checkpoint, resume_model_path, run_paths) -> str:
'''Setup the continuing of a models training.
If a checkpoint is given then this checkpoint is used for loading a model to continue training.
If a path to a models directory is given then copy the summaries into the current models directory,
also get the path of a checkpoint to load.
This is either the latest checkpoint, if no checkpoint prefix is given in `resume_checkpoint`,
or it is the checkpoint constructed from the path and the given prefix.
Returns
-------
str
Path to a checkpoint to load
'''
if not resume_model_path: # resume_checkpoint is the path of a checkpoint file
return resume_checkpoint
else: # resume_checkpoint is a checkpoint prefix
summaries_directory = pathlib.Path(resume_model_path) / 'summaries'
# Copy the summaries of the previous attempt
shutil.copytree(src=str(summaries_directory),
dst=run_paths['model_directories'][model_name]['summaries'],
dirs_exist_ok=True)
if not resume_checkpoint: # Load latest checkpoint
return str(pathlib.Path(resume_model_path) / 'checkpoints')
else: # Load specified checkpoint
return str(pathlib.Path(resume_model_path) / 'checkpoints' / resume_checkpoint)
def verify_configs (models) -> None:
'''Simple tests if the given model configs. are of correct type/structure.
'''
# Verify the models type
if isinstance(models, dict) and ('model' not in models):
raise ValueError("The model dictionary should contain the key 'model' with a name.")
elif isinstance(models, list):
for model in models:
if isinstance(model, dict) and ('model' not in model):
raise ValueError("The model dictionary should contain the key 'model' with name.")
else:
raise ValueError("The model should be a dictionary or list of dictonaries.")
# Verify configs
if FLAGS.ensem and (not isinstance(models, list) or len(models) < 2):
raise ValueError("For Ensemble Learning, train more than one model.")
@gin.configurable
def main(argv,
models, use_hyperparameter_tuning, deep_visualization, # <- configs
resume_checkpoint, resume_model_path, evaluation_checkpoint): # <- configs
verify_configs(models)
# Generate folder structures
run_paths = utils_params.generate_run_directory()
# Set loggers
utils_misc.set_loggers(paths=run_paths, logging_level=logging.INFO)
# Save gin config
utils_params.save_config(run_paths['path_gin'], gin.config_str() )
# Create dataset(s), returns the names of the available datasets.
dataset_handles = DatasetLoader().create_datasets()
if use_hyperparameter_tuning:
logging.info("Hyper-parameter tuning set to True. Starting Hyper-parameter tuning...")
# Delete the previous run directories inside the main 'hyperparameter_tuning' directory
# utils_params.delete_previous_hyperparameter_runs(run_paths)
# Now start the runs for all the hyperparameters.
# All the run specific checkpoints and summaries will be saved under 'run_<datetime>'
# under 'experiment' folder.
hyperparameter_tuning(models, run_paths)
elif FLAGS.train or FLAGS.eval or FLAGS.ensem:
for index, model_configuration in enumerate(models):
# Load datasets
train_dataset, validation_dataset, test_dataset, dataset_info = \
DatasetLoader().load_dataset(dataset_handle=model_configuration['dataset_handle'] )
# Load model
model_architecture = load_model(model_configuration, dataset_info, run_paths)
# Log/display model configuration at start.
model_config_string = '\n'.join( [f"'{key}': {model_configuration[key] }"
if type(model_configuration[key] ) is not str else f"'{key}': '{model_configuration[key] }'" for key in model_configuration.keys() ] )
logging.info( ('Current model:\n'
f"Model name: '{model_architecture.name}'\n"
+ model_config_string) )
utils_params.generate_model_directories(model_architecture.name, run_paths)
resume_checkpoint_path = ''
# Load checkpoint if this is the first model of the run.
if (index == 0) and (resume_checkpoint or resume_model_path):
resume_checkpoint_path = setup_checkpoint_loading(model_architecture.name, resume_checkpoint, resume_model_path, run_paths)
last_checkpoint = ''
if FLAGS.train:
trainer = Trainer(model_architecture, train_dataset, validation_dataset, dataset_info, run_paths,
resume_checkpoint=resume_checkpoint_path, class_weights_scale=model_configuration['class_weights_scale'] )
last_checkpoint = trainer.train()
if FLAGS.eval and not FLAGS.ensem:
# no need to evaluate individual models here for FLAGS.ensem=True,
# because if eFLAGS.ensem=True then it will be evaluated in the next
# part of the code below for models individually as well as for
# ensemble model
if not FLAGS.train: # Evaluate a saved model
last_checkpoint = evaluation_checkpoint
_, _, _ = Evaluator(model_architecture, last_checkpoint, test_dataset, dataset_info, run_paths).evaluate()
if deep_visualization:
_deep_visualization(model_architecture, test_dataset, dataset_info, run_paths)
if FLAGS.ensem:
# Load datasets
train_dataset, validation_dataset, test_dataset, dataset_info = DatasetLoader().load_dataset(
dataset_handle=models[0]['dataset_handle'])
models = load_model(models, dataset_info)
ensemble = StackingEnsemble(models, None, dataset_info['label']['num_classes'],
dataset_info, run_paths)
level_0_loaded_models = ensemble.get_level_0_models() # list of tuple (model_name, loaded_model)
# for all the loaded models do an evaluation to see how it performs individually
for model in level_0_loaded_models:
model_name, model_architecture = model
# restore_from_checkpoint is False here because this models are already loaded from
# trained_models folder. So no need to restore it again
_, _, _ = Evaluator(model_architecture, last_checkpoint, test_dataset, dataset_info,
run_paths, restore_from_checkpoint=False).evaluate()
ensemble_model = ensemble.get_stacking_ensemble_model()
utils_params.generate_model_directories(ensemble_model.name, run_paths)
resume_checkpoint_path = setup_checkpoint_loading(ensemble_model.name,
resume_checkpoint, resume_model_path, run_paths)
ensemble_trainer = Trainer(ensemble_model, train_dataset, validation_dataset, dataset_info, run_paths,
resume_checkpoint=resume_checkpoint_path, is_ensemble=True)
last_checkpoint = ensemble_trainer.train()
_, _, _ = Evaluator(ensemble_model, last_checkpoint, test_dataset, dataset_info, run_paths,
is_ensemble=True).evaluate()
if __name__ == '__main__':
gin_config_path = pathlib.Path(__file__).parent / 'configs' / 'config.gin'
gin.parse_config_files_and_bindings([gin_config_path], [])
absl.app.run(main)
|
'''
1、确保一个类只有一个对象
2、提供一个访问该实例的全局访问点
'''
class MySingleton:
__obj = None
__init_flag = True
def __new__(cls, *args, **kwargs):
if cls.__obj == None:
cls.__obj = object.__new__(cls)
return cls.__obj
def __init__(self, name):
if MySingleton.__init_flag == True:
print("Init........")
self.name = name
MySingleton.__init_flag = False
sg01 = MySingleton("SG01")
sg02 = MySingleton("SG02")
print(sg01)
print(sg02)
sg03 = MySingleton("SG03")
print(sg03) |
# change hosts_temp to hosts_temp in line 29 and 37 to start blocking
import time
from datetime import datetime as dt
hosts_temp = "hosts" # For testing
hosts_path = r"C:\Windows\System32\drivers\etc\hosts"
redirect = "127.0.0.1"
website_list = ["www.facebook.com", "facebook.com", "www.youtube.com", "youtube.com"]
while True:
print("Websites that will be blocked are: " + str(website_list))
user_input = input("\n\nEnter 'add' to add Websites(separate with spaces only) else press 'Enter' to start blocking: ")
if user_input == 'add':
new_website = input("\nEnter the website: ")
website_list.extend(new_website.split(' '))
else:
print("\nWebsites that will be blocked are: " + str(website_list))
break
print("Set timer to block('Hour must be greater than or equal to present hour')\n\nPresent Hour is: " + str(dt.now().hour))
start_time = int(input("\nEnter start time: "))
end_time = int(input("Enter end time: "))
total_working_hours = end_time - start_time
while True:
if dt(dt.now().year, dt.now().month, dt.now().day, start_time) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, end_time):
print("Working Hours...from: {0} and ends at {1}".format(start_time, end_time))
with open(hosts_temp, 'r+') as file:
content = file.read()
for website in website_list:
if website in content:
pass
else:
file.write(redirect + " " + website + "\n")
else:
with open(hosts_temp, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in website_list):
file.write(line)
file.truncate()
if dt.now().hour > start_time:
start_time = end_time + 1
end_time = end_time + total_working_hours + 1
print("Free Time..Next Working Hour starts from {0}".format(start_time))
time.sleep(5)
|
from flask import Flask
from flask_restful import Api, Resource, reqparse
from globalterrorism import GTData
#import parser
#from csvtojson import convertCSVtoJSON
#convertCSVtoJSON()
app = Flask(__name__)
api = Api(app)
api.add_resource(GTData, "/gt/<string:eventid>")
app.run(debug=True) |
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 10:08:11 2020
@author: pravesh
"""
import cv2
import numpy as np
import os
from os import listdir
from os.path import isfile,join
import random
cap = cv2.VideoCapture(0)
ret,frame1 = cap.read()
ret,frame2 = cap.read()
while cap.isOpened():
diff=cv2.absdiff(frame1,frame2)
gray = cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
_,thresh = cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh,None,iterations=3)
contours,_=cv2.findContours(dilated,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x,y,w,h)=cv2.boundingRect(contour)
if cv2.contourArea(contour)<1000:
continue
cv2.rectangle(frame1,(x,y),(x+w , y+h),(0,255,0),2)
cv2.putText(frame1,"Status:{}".format('Movement'),(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)
music_dir="H:/song/warning" # make one folder which only contains music and give the full path of that folder here
songs=listdir(music_dir)
#print(songs)
#os.startfile("H:/song/warning")
#print(len(songs))
# if you will see an "list index out of range" error then make sure that
#number of songs is less than that of random number generated by function.
#ak=random.randint(0,len(songs))# for random song which is more attractive
#print(ak)
os.startfile(os.path.join(music_dir,songs[0]))
print(songs[0])
break # if you wanna control this loop then you can control this by assuming a variable and make the value is 0 or 1 with user interest.
#cv2.drawContours(frame1,contours,-1,(0,255,0),2)
cv2.imshow("feed",frame1)
frame1=frame2
ret,frame2=cap.read()
if cv2.waitKey(40)==27:
break
cap.release
cv2.destroyAllWindows() |
# -*- coding: utf-8 -*-
import datetime
import os
import sys
import dash
from dash import dcc
from dash import dash_table as dt
from dash import html
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from awscostparser import AWSCostParser
if not os.environ.get("AWS_PROFILE"):
print(
"Error: Please set `AWS_PROFILE` in your environment variables (i.e. export AWS_PROFILE='prod')"
)
sys.exit(2)
dailyresources = AWSCostParser(days=60, granularity="DAILY")
dailyr_df = dailyresources.df
dailyr_df.to_pickle("dailyr.df")
dailyr_df = pd.read_pickle("dailyr.df")
annualresources = AWSCostParser(days=365, granularity="MONTHLY")
annualresources = annualresources.df
annualresources.to_pickle("byaccount.df")
account_df = pd.read_pickle("byaccount.df")
key = "source"
acpk = AWSCostParser(key=key, days=60, granularity="DAILY")
dfk = acpk.df
dfk.to_pickle("bysourcetag.df")
tag_df = pd.read_pickle("bysourcetag.df")
colors = dict(graphtext="rgb(242, 158, 57)")
preffont = dict(size=10, color=colors["graphtext"])
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
def generate_table(dataframe, max_rows=10):
return html.Table(
[html.Tr([html.Th(col) for col in dataframe.columns])]
+ [
html.Tr([html.Td(dataframe.iloc[i][col]) for col in dataframe.columns])
for i in range(min(len(dataframe), max_rows))
]
)
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
today = datetime.datetime.utcnow().now()
yesterday = today - datetime.timedelta(days=1)
yestermonth = today - datetime.timedelta(days=31)
# Current MTD versus last month - Total cost per account
last60days = today - datetime.timedelta(days=60)
data_mask = (
dailyr_df["start"] >= f"{last60days.year}-{last60days.month}-{last60days.day}"
)
month_compare = (
dailyr_df[data_mask]
.groupby(["account", pd.Grouper(key="start", freq="30d")])["amount"]
.sum()
.reset_index()
.pivot(index="account", values="amount", columns="start")
)
month_compare["diff"] = month_compare.iloc[:, 1] - month_compare.iloc[:, 0]
month_compare.columns = ["Previous Month", "Current Month", "Difference"]
month_compare = pd.concat([month_compare,
month_compare.sum(numeric_only=True).rename("Total")
])
month_data = month_compare.to_dict(orient="index")
month_compare.reset_index(inplace=True)
# Cost for AWS by account
adf = account_df.groupby(["start", "account"], as_index=False)["amount"].sum()
account_cost = go.Figure()
for account in adf["account"].unique():
sel = adf[adf["account"] == account]
visible = account in ["Data Prod", "Data Dev"]
params = {"name": account, "x": sel["start"], "y": sel["amount"]}
if not visible:
params["visible"] = "legendonly"
account_cost.add_trace(go.Bar(**params))
account_cost.update_layout(
xaxis=dict(
title="Month",
),
yaxis=dict(
title="Amount (USD)",
),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=preffont,
)
# Cost for AWS by resource [30 days, daily]
drdf = dailyr_df.groupby(["start", "resource"], as_index=False)["amount"].sum()
daily_resource_cost = go.Figure()
for resource in drdf["resource"].unique():
rsel = drdf[drdf["resource"] == resource]
visible = resource in [
"AWS Glue",
"AWS Lambda",
"Amazon Simple Storage Service",
"Amazon Redshift",
]
params = {"name": resource, "x": rsel["start"], "y": rsel["amount"]}
if not visible:
params["visible"] = "legendonly"
daily_resource_cost.add_trace(go.Scatter(**params))
## Add total line
total_df = dailyr_df.groupby(["start"], as_index=False)["amount"].sum()
params = {
"name": "total",
"x": total_df["start"],
"y": total_df["amount"],
}
daily_resource_cost.add_trace(go.Scatter(**params))
daily_resource_cost.update_layout(
paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)", font=preffont
)
# Cost for AWS by resource [365 days, monthly]
rdf = account_df.groupby(["start", "resource"], as_index=False)["amount"].sum()
resource_cost = go.Figure()
for index, resource in enumerate(rdf["resource"].unique()):
rsel = rdf[rdf["resource"] == resource]
visible = resource in [
"AWS Glue",
"AWS Lambda",
"Amazon Simple Storage Service",
"Amazon Redshift",
]
params = {"name": resource, "x": rsel["start"], "y": rsel["amount"]}
if not visible:
params["visible"] = "legendonly"
resource_cost.add_trace(go.Scatter(**params))
resource_cost.update_layout(
paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)", font=preffont
)
active_accounts = ["Data Dev", "Data Prod"]
data_mask = (
(account_df["account"].isin(active_accounts))
& (account_df["start"] >= f"{today.year}-{today.month}-01")
& (account_df["amount"] > 1)
)
data = account_df[data_mask]
data_acc_fig = px.bar(data, x="resource", y="amount", color="account", barmode="group")
data_acc_fig.update_layout(
xaxis=dict(
title="Resource",
),
yaxis=dict(
title="Amount (USD)",
),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=preffont,
)
# Yesterday's untagged resources
mask = (tag_df["start"] >= yesterday.strftime("%Y-%m-%d")) & (tag_df["source"] == "")
ydf = (
tag_df[mask]
.groupby(["start", key, "resource"], as_index=False)["amount"]
.sum()
.sort_values("amount", ascending=False)
)
fig_ydf = go.Figure(
data=[
go.Bar(
x=ydf["resource"],
y=ydf["amount"],
text=ydf["amount"],
textposition="auto",
)
]
)
fig_ydf.update_layout(
paper_bgcolor="rgba(0,0,0,0)", plot_bgcolor="rgba(0,0,0,0)", font=preffont
)
# Top 10 most expensive resources last 30 days vs. previous 30 days
last60days = today - datetime.timedelta(days=60)
ten_most_expensive = (
dailyr_df[dailyr_df["resource"] != "Tax"]
.groupby("resource")["amount"]
.sum()
.sort_values()
.tail(10)
.index
)
data_mask = (
dailyr_df["start"] >= f"{last60days.year}-{last60days.month}-{last60days.day}"
) & (dailyr_df["resource"].isin(ten_most_expensive))
rmonth_compare = (
dailyr_df[data_mask]
.groupby(["resource", pd.Grouper(key="start", freq="30d")])["amount"]
.sum()
.reset_index()
.sort_values("amount", ascending=False)
)
rmonth_compare["period"] = np.where(
rmonth_compare.start == rmonth_compare.start.max(),
"Last 30 days",
"Previous 30 days",
)
fig_merdf = px.bar(
rmonth_compare,
x="amount",
y="resource",
color="period",
barmode="group",
orientation="h",
)
fig_merdf.update_layout(
xaxis=dict(
title="Amount (USD)",
),
yaxis=dict(
title="Resource",
autorange="reversed",
),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=preffont,
)
# Cost by source
# TODO: Ensure source is in the tags
data_mask = (
tag_df["start"] >= f"{today.year}-{today.month}-01"
) # & (tag_df['source'] != "")
source_df = (
tag_df[data_mask]
.groupby(["source", "resource"], as_index=False)["amount"]
.sum()
.sort_values("source", ascending=False)
)
source_cost_fig = px.bar(
source_df, x="amount", y="source", color="resource", orientation="h"
)
source_cost_fig.update_layout(
xaxis=dict(
title="Amount (USD)",
),
yaxis=dict(
title="Source",
autorange="reversed",
),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=preffont,
title={
"text": "Cost per data source - MTD",
"font": {"size": 20},
"y": 1,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
)
app.layout = html.Div(
children=[
html.Div(
[
html.H1(children="AWS cost overview"),
html.H2(
children="Dashboard showing the data for the different accounts."
),
],
className="header",
),
html.Div(
[
html.H3("Cost overview last 30 days vs. previous 30 days"),
dt.DataTable(
columns=[
{"name": i, "id": i, "deletable": False}
for i in month_compare.columns
],
data=month_compare.to_dict("records"),
css=[
{
"selector": ".dash-cell div.dash-cell-value",
"rule": "display: inline; white-space: inherit; overflow: inherit; text-overflow: inherit;",
}
],
style_data_conditional=[
{
"if": {
"column_id": "Difference",
"filter_query": "{Difference} > 0",
},
"backgroundColor": "red",
"color": "white",
},
{
"if": {
"column_id": "Difference",
"filter_query": "{Difference} < 0",
},
"backgroundColor": "green",
"color": "white",
},
],
style_data={"border": "0px", "backgroundColor": "#444"},
style_header={
"border": "0px",
"backgroundColor": "#444",
"fontWeight": "bold",
},
),
html.Div(
[
html.H3(
"Top 10 - Most expensive resources - All accounts total - Last 30 days vs. previous 30 days"
),
dcc.Graph(id="fig_merdf", figure=fig_merdf),
]
),
html.Div(
[
html.H3("Costs for data accounts by resource - MTD"),
dcc.Graph(id="fig_sub", figure=data_acc_fig),
]
),
html.Div(
[
html.H3("Costs of AWS grouped by account"),
dcc.Graph(id="account_cost", figure=account_cost),
]
),
html.Div([html.H3("Cost per resource")]),
html.Div(
[
html.Div(
[
html.H4("Last year - Monthly"),
dcc.Graph(id="resource_cost", figure=resource_cost),
],
className="six columns",
),
html.Div(
[
html.H4("Last 30 days - Daily"),
dcc.Graph(
id="daily_resource_cost", figure=daily_resource_cost
),
],
className="six columns",
),
],
className="row",
),
html.Div(
[
html.H3("Yesterdays untagged resources"),
dcc.Graph(id="fig_ydf", figure=fig_ydf),
]
),
html.Div([dcc.Graph(id="source_cost_fig", figure=source_cost_fig)]),
],
className="container-wide",
),
]
)
if __name__ == "__main__":
app.run_server(debug=True)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 11:57:09 2020
@author: Dogancan Torun
"""
#Subject:Set Operations-Methods and Frozenset
#Set definition
s={10,20,30,'ali',(1,2,3)}
s2=set([20,60,30,'mehmet',(9,0,8)]) #another definition set
#print(s[0]) not get any member of set
print(20 in s) #member control block
cast_list=list(s)
print("Casting list={}" .format(s))
#Set Operations
print("Subset control= {} " .format(s.issubset(s2)))
print("Superset control= {} " .format(s.issuperset(s2)))
print("Intersection operation = {} " .format(s.intersection(s2)))
print("Union operation = {} " .format(s.union(s2)))
print("Difference control = {} " .format(s.difference(s2)))
#Set Methods
s.add(99)
print("Add a member set {} " .format(s) )
s.update(range(100,105,2))
print("Iterable add operations {} " .format(s) )
s.remove((1,2,3))
print("After the removing={} " .format(s))
s.clear()
print("All members delete operation={} " .format(s))
#Frozenset definition
fs=frozenset([5,55,555,5555,'ali',20,60,(9,0,8)])
#fs={a,b,c}
print(type(fs))
print(fs)
#frozenset operations
print("FS Subset control = {} " .format(fs.issubset(s2)))
print("FS Superset control= {} " .format(fs.issuperset(s2)))
print("FS Intersection operation = {} " .format(fs.intersection(s2)))
print("FS Union operation = {} " .format(s.union(s2)))
print("FS Difference control = {} " .format(fs.difference(s2)))
|
"""
DeepLabCut2.0 Toolbox (deeplabcut.org)
© A. & M. Mathis Labs
https://github.com/AlexEMG/DeepLabCut
Please see AUTHORS for contributors.
https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS
Licensed under GNU Lesser General Public License v3.0
"""
from __future__ import print_function
import wx
import cv2
import os
import matplotlib
import numpy as np
from pathlib import Path
import argparse
from deeplabcut.utils import auxiliaryfunctions
from skimage import io
from skimage.util import img_as_ubyte
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.colors as mcolors
import matplotlib.patches as patches
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.widgets import RectangleSelector
# ###########################################################################
# Class for GUI MainFrame
# ###########################################################################
class ImagePanel(wx.Panel):
def __init__(self, parent,config,gui_size,**kwargs):
h=gui_size[0]/2
w=gui_size[1]/3
wx.Panel.__init__(self, parent, -1,style=wx.SUNKEN_BORDER,size=(h,w))
self.figure = matplotlib.figure.Figure()
self.axes = self.figure.add_subplot(1, 1, 1)
self.canvas = FigureCanvas(self, -1, self.figure)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
def getfigure(self):
"""
Returns the figure, axes and canvas
"""
return(self.figure,self.axes,self.canvas)
def getColorIndices(self,img,bodyparts):
"""
Returns the colormaps ticks and . The order of ticks labels is reversed.
"""
im = io.imread(img)
norm = mcolors.Normalize(vmin=0, vmax=np.max(im))
ticks = np.linspace(0,np.max(im),len(bodyparts))[::-1]
return norm, ticks
class WidgetPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1,style=wx.SUNKEN_BORDER)
class MainFrame(wx.Frame):
"""Contains the main GUI and button boxes"""
def __init__(self, parent,config,slider_width=25):
# Settting the GUI size and panels design
displays = (wx.Display(i) for i in range(wx.Display.GetCount())) # Gets the number of displays
screenSizes = [display.GetGeometry().GetSize() for display in displays] # Gets the size of each display
index = 0 # For display 1.
screenWidth = screenSizes[index][0]
screenHeight = screenSizes[index][1]
self.gui_size = (screenWidth*0.7,screenHeight*0.85)
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = 'DeepLabCut2.0 - Manual Frame Extraction',
size = wx.Size(self.gui_size), pos = wx.DefaultPosition, style = wx.RESIZE_BORDER|wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText("")
self.SetSizeHints(wx.Size(self.gui_size)) # This sets the minimum size of the GUI. It can scale now!
###################################################################################################################################################
# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
topSplitter = wx.SplitterWindow(self)
self.image_panel = ImagePanel(topSplitter, config,self.gui_size)
self.widget_panel = WidgetPanel(topSplitter)
topSplitter.SplitHorizontally(self.image_panel, self.widget_panel,sashPosition=self.gui_size[1]*0.83)#0.9
topSplitter.SetSashGravity(1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(topSplitter, 1, wx.EXPAND)
self.SetSizer(sizer)
###################################################################################################################################################
# Add Buttons to the WidgetPanel and bind them to their respective functions.
widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)
self.load = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Load Video")
widgetsizer.Add(self.load , 1, wx.ALL, 15)
self.load.Bind(wx.EVT_BUTTON, self.browseDir)
self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
widgetsizer.Add(self.help , 1, wx.ALL, 15)
self.help.Bind(wx.EVT_BUTTON, self.helpButton)
self.grab = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Grab Frames")
widgetsizer.Add(self.grab , 1, wx.ALL, 15)
self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
self.grab.Enable(False)
widgetsizer.AddStretchSpacer(5)
size_x = round(self.gui_size[0] * (slider_width/100), 0)
self.slider = wx.Slider(self.widget_panel, id=wx.ID_ANY, value = 0, minValue=0, maxValue=1,size=(size_x, -1), style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS )
widgetsizer.Add(self.slider,1, wx.ALL,5)
self.slider.Hide()
widgetsizer.AddStretchSpacer(5)
self.start_frames_sizer = wx.BoxSizer(wx.VERTICAL)
self.end_frames_sizer = wx.BoxSizer(wx.VERTICAL)
self.start_frames_sizer.AddSpacer(15)
self.startFrame = wx.SpinCtrl(self.widget_panel, value='0', size=(100, -1), min=0, max=120)
self.startFrame.Bind(wx.EVT_SPINCTRL,self.updateSlider)
self.startFrame.Enable(False)
self.start_frames_sizer.Add(self.startFrame,1, wx.EXPAND|wx.ALIGN_LEFT,15)
start_text = wx.StaticText(self.widget_panel, label='Start Frame Index')
self.start_frames_sizer.Add(start_text,1, wx.EXPAND|wx.ALIGN_LEFT,15)
self.checkBox = wx.CheckBox(self.widget_panel, id=wx.ID_ANY,label = 'Range of frames')
self.checkBox.Bind(wx.EVT_CHECKBOX,self.activate_frame_range)
self.start_frames_sizer.Add(self.checkBox,1, wx.EXPAND|wx.ALIGN_LEFT,15)
#
self.end_frames_sizer.AddSpacer(15)
self.endFrame = wx.SpinCtrl(self.widget_panel, value='1', size=(160, -1), min=1, max=120)
self.endFrame.Enable(False)
self.end_frames_sizer.Add(self.endFrame,1, wx.EXPAND|wx.ALIGN_LEFT,15)
end_text = wx.StaticText(self.widget_panel, label='Number of Frames')
self.end_frames_sizer.Add(end_text,1, wx.EXPAND|wx.ALIGN_LEFT,15)
self.updateFrame = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Update")
self.end_frames_sizer.Add(self.updateFrame,1, wx.EXPAND|wx.ALIGN_LEFT,15)
self.updateFrame.Bind(wx.EVT_BUTTON, self.updateSlider)
self.updateFrame.Enable(False)
widgetsizer.Add(self.start_frames_sizer,1,wx.ALL,0)
widgetsizer.AddStretchSpacer(5)
widgetsizer.Add(self.end_frames_sizer,1,wx.ALL,0)
widgetsizer.AddStretchSpacer(15)
self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
widgetsizer.Add(self.quit , 1, wx.ALL, 15)
self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
self.quit.Enable(True)
# Hiding these widgets and show them once the video is loaded
self.start_frames_sizer.ShowItems(show=False)
self.end_frames_sizer.ShowItems(show=False)
self.widget_panel.SetSizer(widgetsizer)
self.widget_panel.SetSizerAndFit(widgetsizer)
self.widget_panel.Layout()
# Variables initialization
self.numberFrames = 0
self.currFrame = 0
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.drs = []
self.cfg = auxiliaryfunctions.read_config(config)
self.Task = self.cfg['Task']
self.start = self.cfg['start']
self.stop = self.cfg['stop']
self.date = self.cfg['date']
self.trainFraction = self.cfg['TrainingFraction']
self.trainFraction = self.trainFraction[0]
self.videos = self.cfg['video_sets'].keys()
self.bodyparts = self.cfg['bodyparts']
self.colormap = plt.get_cmap(self.cfg['colormap'])
self.colormap = self.colormap.reversed()
self.markerSize = self.cfg['dotsize']
self.alpha = self.cfg['alphavalue']
self.video_names = [Path(i).stem for i in self.videos]
self.config_path = Path(config)
self.extract_range_frame = False
self.extract_from_analyse_video = False
def quitButton(self, event):
"""
Quits the GUI
"""
self.statusbar.SetStatusText("")
dlg = wx.MessageDialog(None,"Are you sure?", "Quit!",wx.YES_NO | wx.ICON_WARNING)
result = dlg.ShowModal()
if result == wx.ID_YES:
print("Quitting for now!")
self.Destroy()
def updateSlider(self,event):
self.slider.SetValue(self.startFrame.GetValue())
self.currFrame = (self.slider.GetValue())
if self.extract_from_analyse_video == True:
self.figure.delaxes(self.figure.axes[1])
self.plot_labels()
self.update()
def activate_frame_range(self,event):
"""
Activates the frame range boxes
"""
self.checkSlider = event.GetEventObject()
if self.checkSlider.GetValue() == True:
self.extract_range_frame = True
self.startFrame.Enable(True)
self.startFrame.SetValue(self.slider.GetValue())
self.endFrame.Enable(True)
self.updateFrame.Enable(True)
self.grab.Enable(False)
else:
self.extract_range_frame = False
self.startFrame.Enable(False)
self.endFrame.Enable(False)
self.updateFrame.Enable(False)
self.grab.Enable(True)
def line_select_callback(self,eclick, erelease):
'eclick and erelease are the press and release events'
self.new_x1, self.new_y1 = eclick.xdata, eclick.ydata
self.new_x2, self.new_y2 = erelease.xdata, erelease.ydata
def CheckCropping(self):
''' Display frame at time "time" for video to check if cropping is fine.
Select ROI of interest by adjusting values in myconfig.py
USAGE for cropping:
clip.crop(x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None)
Returns a new clip in which just a rectangular subregion of the
original clip is conserved. x1,y1 indicates the top left corner and
x2,y2 is the lower right corner of the cropped region.
All coordinates are in pixels. Float numbers are accepted.
'''
videosource = self.video_source
self.x1 = int(self.cfg['video_sets'][videosource]['crop'].split(',')[0])
self.x2 = int(self.cfg['video_sets'][videosource]['crop'].split(',')[1])
self.y1 = int(self.cfg['video_sets'][videosource]['crop'].split(',')[2])
self.y2 = int(self.cfg['video_sets'][videosource]['crop'].split(',')[3])
if self.cropping==True:
# Select ROI of interest by drawing a rectangle
self.cid = RectangleSelector(self.axes, self.line_select_callback,drawtype='box', useblit=False,button=[1], minspanx=5, minspany=5,spancoords='pixels',interactive=True)
self.canvas.mpl_connect('key_press_event', self.cid)
def OnSliderScroll(self, event):
"""
Slider to scroll through the video
"""
self.axes.clear()
self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
self.currFrame = (self.slider.GetValue())
self.startFrame.SetValue(self.currFrame)
self.update()
def is_crop_ok(self,event):
"""
Checks if the cropping is ok
"""
self.grab.SetLabel("Grab Frames")
self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
self.slider.Show()
self.start_frames_sizer.ShowItems(show=True)
self.end_frames_sizer.ShowItems(show=True)
self.widget_panel.Layout()
self.slider.SetMax(self.numberFrames)
self.startFrame.SetMax(self.numberFrames-1)
self.endFrame.SetMax(self.numberFrames)
self.x1 = int(self.new_x1)
self.x2 = int(self.new_x2)
self.y1 = int(self.new_y1)
self.y2 = int(self.new_y2)
self.canvas.mpl_disconnect(self.cid)
self.axes.clear()
self.currFrame = (self.slider.GetValue())
self.update()
# Update the config.yaml file
self.cfg['video_sets'][self.video_source] = {'crop': ', '.join(map(str, [self.x1, self.x2, self.y1, self.y2]))}
auxiliaryfunctions.write_config(self.config_path,self.cfg)
def browseDir(self, event):
"""
Show the File Dialog and ask the user to select the video file
"""
self.statusbar.SetStatusText("Looking for a video to start extraction..")
dlg = wx.FileDialog(self, "SELECT A VIDEO", os.getcwd(), "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.video_source_original = dlg.GetPath()
self.video_source = str(Path(self.video_source_original).resolve())
self.load.Enable(False)
else:
pass
dlg.Destroy()
self.Close(True)
dlg.Destroy()
selectedvideo = Path(self.video_source)
self.statusbar.SetStatusText('Working on video: {}'.format(os.path.split(str(selectedvideo))[-1]))
if str(selectedvideo.stem) in self.video_names :
self.grab.Enable(True)
self.vid = cv2.VideoCapture(self.video_source)
self.videoPath = os.path.dirname(self.video_source)
self.filename = Path(self.video_source).name
self.numberFrames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
# Checks if the video is corrupt.
if not self.vid.isOpened():
msg = wx.MessageBox('Invalid Video file!Do you want to retry?', 'Error!', wx.YES_NO | wx.ICON_WARNING)
if msg == 2:
self.load.Enable(True)
MainFrame.browseDir(self, event)
else:
self.Destroy()
self.slider.Bind(wx.EVT_SLIDER, self.OnSliderScroll)
self.update()
cropMsg = wx.MessageBox("Do you want to crop the frames?",'Want to crop?',wx.YES_NO|wx.ICON_INFORMATION)
if cropMsg == 2:
self.cropping = True
self.grab.SetLabel("Set cropping parameters")
self.grab.Bind(wx.EVT_BUTTON, self.is_crop_ok)
self.widget_panel.Layout()
self.basefolder = 'data-' + self.Task + '/'
MainFrame.CheckCropping(self)
else:
self.cropping = False
self.slider.Show()
self.start_frames_sizer.ShowItems(show=True)
self.end_frames_sizer.ShowItems(show=True)
self.widget_panel.Layout()
self.slider.SetMax(self.numberFrames-1)
self.startFrame.SetMax(self.numberFrames-1)
self.endFrame.SetMax(self.numberFrames-1)
else:
wx.MessageBox('Video file is not in config file. Use add function to add this video in the config file and retry!', 'Error!', wx.OK | wx.ICON_WARNING)
self.Close(True)
def update(self):
"""
Updates the image with the current slider index
"""
self.grab.Enable(True)
self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
self.figure,self.axes,self.canvas = self.image_panel.getfigure()
self.vid.set(1,self.currFrame)
ret, frame = self.vid.read()
if ret:
frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.ax = self.axes.imshow(frame)
self.axes.set_title(str(str(self.currFrame)+"/"+str(self.numberFrames-1) +" "+ self.filename))
self.figure.canvas.draw()
def chooseFrame(self):
ret, frame = self.vid.read()
fname = Path(self.filename)
output_path = self.config_path.parents[0] / 'labeled-data' / fname.stem
if output_path.exists() :
frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame=img_as_ubyte(frame)
img_name = str(output_path) +'/img'+str(self.currFrame).zfill(int(np.ceil(np.log10(self.numberFrames)))) + '.png'
if self.cropping:
crop_img = frame[self.y1:self.y2, self.x1:self.x2]
cv2.imwrite(img_name, cv2.cvtColor(crop_img, cv2.COLOR_RGB2BGR))
else:
cv2.imwrite(img_name, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
else:
print("%s path not found. Please make sure that the video was added to the config file using the function 'deeplabcut.add_new_videos'." %output_path)
def grabFrame(self,event):
"""
Extracts the frame and saves in the current directory
"""
num_frames_extract = self.endFrame.GetValue()
for i in range(self.currFrame,self.currFrame+num_frames_extract):
self.currFrame = i
self.vid.set(1,self.currFrame)
self.chooseFrame()
self.vid.set(1,self.currFrame)
self.chooseFrame()
def plot_labels(self):
"""
Plots the labels of the analyzed video
"""
self.vid.set(1,self.currFrame)
ret, frame = self.vid.read()
if ret:
frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.norm = mcolors.Normalize(vmin=np.min(frame), vmax=np.max(frame))
self.colorIndex = np.linspace(np.min(frame),np.max(frame),len(self.bodyparts))
divider = make_axes_locatable(self.axes)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = self.figure.colorbar(self.ax, cax=cax,spacing='proportional', ticks=self.colorIndex)
cbar.set_ticklabels(self.bodyparts)
for bpindex, bp in enumerate(self.bodyparts):
color = self.colormap(self.norm(self.colorIndex[bpindex]))
self.points = [self.Dataframe[self.scorer][bp]['x'].values[self.currFrame],self.Dataframe[self.scorer][bp]['y'].values[self.currFrame],1.0]
circle = [patches.Circle((self.points[0], self.points[1]), radius=self.markerSize, fc = color , alpha=self.alpha)]
self.axes.add_patch(circle[0])
self.figure.canvas.draw()
def helpButton(self,event):
"""
Opens Instructions
"""
wx.MessageBox('1. Use the Load Video button to load a video. Use the slider to select a frame in the entire video. The number mentioned on the top of the slider represents the frame index. \n\n2. Click Grab Frames button to save the specific frame.\n\n3. In events where you need to extract a range of frames, then use the checkbox Range of frames to select the start frame index and number of frames to extract. Click the update button to see the start frame index. Click Grab Frames to select the range of frames. \n\n Click OK to continue', 'Instructions to use!', wx.OK | wx.ICON_INFORMATION)
class MatplotPanel(wx.Panel):
def __init__(self, parent,config):
wx.Panel.__init__(self, parent,-1,size=(100,100))
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
def show(config, slider_width=25):
import imageio
imageio.plugins.ffmpeg.download()
app = wx.App()
frame = MainFrame(None,config,slider_width).Show()
app.MainLoop()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config')
cli_args = parser.parse_args()
|
from docx import Document
import tempfile
class SummaryWriter:
def __init__(self, summaries):
self.summaries = summaries
def write_docx(self, destination_path):
"""
Writes summaries to given file in docx format.
:return:
"""
document = Document()
for filename in self.summaries['filenames']:
document.add_heading(filename, level=1)
sections = self.summaries[filename]
for title in sections['titles']:
document.add_heading(title, level=2)
document.add_paragraph(sections[title]['summary'])
document.save(destination_path)
def write_txt(self, destination_path):
"""
Writes summaries to given file in docx format.
:return:
"""
text = ''
for filename in self.summaries['filenames']:
text += filename + '\n\n\n'
sections = self.summaries[filename]
for title in sections['titles']:
text += title + '\n\n'
text += sections[title]['summary'] + '\n'
with open(destination_path, 'wb') as f:
f.write(text.encode('utf-8'))
|
import glob
import itertools
import os
import sys
import cv2
from socialresearchcv.processing.CONFIG import CONFIG
import json
import pyrealsense2 as rs
from socialresearchcv.processing.CSVWriter import CSVWriter
from socialresearchcv.processing.ImageSet import ImageSet
from socialresearchcv.processing.Keypoint import Keypoint
from socialresearchcv.processing.PoseViewer import PoseViewer
# from socialresearchcv.processing.AgeGenderDetector import AgeGenderDetector
class InteractionAn():
@staticmethod
def run_all(save_world_points=False, start_index=0, end_index = 0,progress_bar=None):
"""Run interaction pipeline and save data as required in the CONFIG file
Args:
save_world_points (bool, optional): true if 3D points needs to be exported. Defaults to False.
start_index (int, optional): start frame. Defaults to 0.
end_index (int, optional): end frame. Defaults to 0.
progress_bar (_type_, optional): progress bar object. Defaults to None.
Raises:
Exception: _description_
"""
pipeline = rs.pipeline()
config = rs.config()
if CONFIG.LOAD_FROM_FILE:
config.enable_device_from_file(CONFIG.DIR_PATH)
else:
config.enable_stream(rs.stream.depth)
profile = pipeline.start(config)
# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height
pose_viewer = PoseViewer(intrinsics=depth_intrinsics)
csv_writer = CSVWriter(path=f'{CONFIG.CSV_PATH}_{start_index}_{end_index}.csv')
index = start_index
point_cloud_over_time = []
# if CONFIG.EXTRACT_AGE_GENDER:
# ag_detector = AgeGenderDetector()
for number in (range(end_index-start_index)):
if progress_bar is not None:
progress_bar.set_description(f'Parsing Frame #{index}')
progress_bar.update(1)
image_set = ImageSet(index)
with open(image_set.keypoints_path) as json_file:
data = json.load(json_file)
people = data['people']
if len(people) <= 0: # No People found, adding empty row
csv_writer.add_empty_row(frame_index=index)
else:
point_cloud_over_time.append([])
frame_keypoints = list()
################# CREATE FRAME KEYPOINTS ###############
########################################################
for p_index, p in enumerate(people):
keypoints_list = p['pose_keypoints_2d']
current_keypoint = Keypoint(index, p_index, image_set, keypoints_list, pose_viewer)
point_for_cloud = []
for i in range(0, len(keypoints_list) // 3):
p_cloud = [int(keypoints_list[3 * i]), int(keypoints_list[3 * i + 1]),
keypoints_list[3 * i + 2]]
point_for_cloud.append(p_cloud)
csv_writer.add_poses(
[pose_viewer.get_world_point(p_2d, image_set.depth_image) for p_2d in point_for_cloud],
p_index, index, "Keypoints(m)")
csv_writer.add_keypoints2D(
[p_2d for p_2d in point_for_cloud],
p_index, index, "Keypoints(image)")
point_cloud_over_time[-1].append(point_for_cloud)
frame_keypoints.append(current_keypoint)
########################################################
################# ANALYZE AND PRESENT DATA #############
########################################################
# Age and Gender
# if CONFIG.EXTRACT_AGE_GENDER:
# out_image,a_g_data = ag_detector.detect(image_set.age_gender)
# [csv_writer.add_age_gender_data(data, i, index, f'Age and Gender') for i,data in enumerate(a_g_data)]
# distance points
for kp in frame_keypoints:
pose_viewer.ShowPoint(kp.keypoint_joints_2d[0], image_set.depth_image,
image_set.depth_colormap,
image_set.analyzed)
#Keypoint IDs
for kp in frame_keypoints:
for j in range(0, 20):
pose_viewer.ShowPersonLabel(kp.keypoint_joints_2d[j], None,
None,
image_set.keypoints_ids, CONFIG.JOINT_NAMES_DICT[j])
#Person IDs
for kp in frame_keypoints:
for j in range(0, 20):
pose_viewer.ShowPersonLabel(kp.keypoint_joints_2d[j], None,
None,
image_set.person_ids, str(kp.keypoint_body_id))
# Add points for distance
points_distance_dict = dict()
for j in range(0, 20):
points_distance_dict[j] = [kp.keypoint_joints_2d[j] for kp in frame_keypoints]
distances = pose_viewer.showDistances(points_distance_dict, image_set.depth_image,
image_set.depth_colormap, image_set.analyzed)
csv_writer.add_pose([(0, 0, 0)] * len(CONFIG.JOINT_NAMES_DICT.keys()), None, index, "Distance(m)",
keypoint_id=-1, distances=distances)
# Point cloud visualization
pose_viewer.showPointCloud(index,
[k.keypoint_body_id for k in
frame_keypoints],
point_cloud_over_time,
image_set.depth_image, image_set.point_cloud,
image_set.point_cloud_hull, csv_writer,
pose_viewer)
# Add points for angles
points_angles_dict_relative = dict()
for j in [2, 5, 15, 16]:
points_angles_dict_relative[j] = [kp.keypoint_joints_2d[j] for kp in frame_keypoints]
points_angles_dict_personal = dict()
bodies = [k.keypoint_body_id for k in frame_keypoints]
for pair in itertools.product([(2, 5), (2, 3), (5, 6)], bodies):
points_angles_dict_personal[pair[0]] = [
(kp.keypoint_joints_2d[pair[0][0]], kp.keypoint_joints_2d[pair[0][1]]) for kp in
frame_keypoints]
pose_viewer.show_angles(points_angles_dict_personal,
points_angles_dict_relative,
image_set.depth_image,
image_set.depth_colormap,
image_set.angles_image_personal,
image_set.angles_image_relative)
if save_world_points:
if os.listdir(CONFIG.KEYPOINTS_PROJECTED_PATH) and index is 0:
sys.exit(f'Path {CONFIG.PNG_PATH_COLOR} for projected keypoints is not empty')
with open(image_set.out_3d_keypoints_path, 'w') as outfile:
json.dump(data, outfile)
index += 1
if index==end_index:
if CONFIG.VERBOSE:
print("End index reached, raising exception")
raise Exception
if CONFIG.SAVE_OUTPUT_VIDEOS:
for p_index, p in enumerate(people):
keypoints_list = p['pose_keypoints_2d']
current_keypoint = Keypoint(index, p_index, image_set, keypoints_list, pose_viewer)
point_for_cloud = []
for i in range(0, len(keypoints_list) // 3):
p_cloud = [int(keypoints_list[3 * i]), int(keypoints_list[3 * i + 1]),
keypoints_list[3 * i + 2]]
point_for_cloud.append(p_cloud)
if CONFIG.BLUR_FACES and CONFIG.SAVE_OUTPUT_VIDEOS:
image_set.blur_faces(point_for_cloud)
image_set.draw_images(index)
@staticmethod
def export_to_video(path, file_name, start_index, end_index):
"""Export sequence into video
Args:
path (string): images path
file_name (string): output file name
start_index (int): start frame
end_index (int): end frame
"""
img_array = []
files = glob.glob(path + "*")
for i in range(start_index, end_index):
img_path = path+"output_"+str(i)+'.jpg'
img = cv2.imread(img_path)
if img is not None:
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
out_path = f'Outputs/{CONFIG.IMAGES_DIR}/{file_name}'
out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'DIVX'), 30, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
@staticmethod
def run(start_index = 0, end_index = 0, progress=None):
"""Run the interaction analysis
Args:
start_index (int, optional): start frame. Defaults to 0.
end_index (int, optional): end frame. Defaults to 0.
progress (_type_, optional): progress bar object. Defaults to None.
"""
try:
pass
InteractionAn.run_all(save_world_points=False, start_index=start_index, end_index=end_index, progress_bar=progress)
except FileNotFoundError as e:
print('File not found!', file=sys.stderr)
print(e)
except Exception as e:
pass
if CONFIG.VERBOSE:
print('Something went wrong, carrying on', file=sys.stderr)
import traceback
traceback.print_exc()
print(e,file=sys.stderr)
if CONFIG.SAVE_OUTPUT_VIDEOS:
InteractionAn.export_to_video(CONFIG.OUTPUT_IMAGE_PATH_1, f'outVideo1_{start_index}_{end_index}.avi', start_index, end_index)
InteractionAn.export_to_video(CONFIG.OUTPUT_IMAGE_PATH_2, f'outVideo2_{start_index}_{end_index}.avi', start_index, end_index)
InteractionAn.export_to_video(CONFIG.OUTPUT_IMAGE_PATH_3, f'outVideo3_{start_index}_{end_index}.avi', start_index, end_index)
|
import pandas as pd
import plotly.figure_factory as ff
import csv
import random
import plotly.graph_objects as go
import statistics
df = pd.read_csv('studentMarks.csv')
data = df['Math_score'].tolist()
mean_population = statistics.mean(data)
std_deviation_population = statistics.stdev(data)
# fig = ff.create_distplot([data],['Maths Score'],show_hist=False)
# fig.show()
print('Population mean: ',mean_population)
print('Population Standard deviation: ',std_deviation_population)
def random_set_of_mean(count):
data_set = []
for i in range(0,count):
random_index = random.randint(0,len(data)- 1)
value = data[random_index]
data_set.append(value)
mean = statistics.mean(data_set)
return mean
mean_list = []
for i in range(0,1000):
setOfMeans = random_set_of_mean(100)
mean_list.append(setOfMeans)
std_deviation = statistics.stdev(mean_list)
mean = statistics.mean(mean_list)
print('Mean: ',mean)
print('Standard deviation: ',std_deviation)
firstStdStart,firstStdEnd = mean - std_deviation,mean + std_deviation
secondStdStart,secondStdEnd = mean - (2 * std_deviation) , mean + (2 * std_deviation)
thirdStdStart,thirdStdEnd = mean - (3 * std_deviation) , mean + (3 * std_deviation)
# fig = ff.create_distplot([mean_list],['Maths Score'],show_hist=False)
# fig.add_trace(go.Scatter(x = [mean,mean], y = [0,0.2],mode = 'lines',name = 'MEAN'))
# fig.add_trace(go.Scatter(x = [firstStdStart,firstStdStart], y = [0,0.2],mode = 'lines',name = 'First standard deviation start'))
# fig.add_trace(go.Scatter(x = [firstStdEnd,firstStdEnd], y = [0,0.2],mode = 'lines',name = 'First standard deviation end'))
# fig.add_trace(go.Scatter(x = [secondStdStart,secondStdStart], y = [0,0.2],mode = 'lines',name = 'Second standard deviation start'))
# fig.add_trace(go.Scatter(x = [secondStdEnd,secondStdEnd], y = [0,0.2],mode = 'lines',name = 'Second standard deviation end'))
# fig.add_trace(go.Scatter(x = [thirdStdStart,thirdStdStart], y = [0,0.2],mode = 'lines',name = 'Third standard deviation start'))
# fig.add_trace(go.Scatter(x = [thirdStdEnd,thirdStdEnd], y = [0,0.2],mode = 'lines',name = 'Third standard deviation end'))
# fig.show()
df1 = pd.read_csv('data1.csv')
data1 = df1['Math_score'].tolist()
meanOfSample1 = statistics.mean(data1)
# fig = ff.create_distplot([mean_list],['Maths Score'],show_hist=False)
# fig.add_trace(go.Scatter(x = [mean,mean], y = [0,0.2],mode = 'lines',name = 'MEAN'))
# fig.add_trace(go.Scatter(x = [meanOfSample1,meanOfSample1], y = [0,0.2], mode = 'lines',name = 'Mean of sample1'))
# fig.add_trace(go.Scatter(x = [firstStdEnd,firstStdEnd], y = [0,0.2],mode = 'lines',name = 'First standard deviation end'))
# fig.show()
df2 = pd.read_csv('data2.csv')
data2 = df2['Math_score'].tolist()
meanOfSample2 = statistics.mean(data2)
# fig = ff.create_distplot([mean_list],['Maths Score'],show_hist=False)
# fig.add_trace(go.Scatter(x = [mean,mean], y = [0,0.2],mode = 'lines',name = 'MEAN'))
# fig.add_trace(go.Scatter(x = [meanOfSample2,meanOfSample2], y = [0,0.2], mode = 'lines',name = 'Mean of sample2'))
# fig.add_trace(go.Scatter(x = [firstStdEnd,firstStdEnd], y = [0,0.2],mode = 'lines',name = 'First standard deviation end'))
# fig.add_trace(go.Scatter(x = [secondStdEnd,secondStdEnd], y = [0,0.2],mode = 'lines',name = 'Second standard deviation end'))
# fig.show()
df3 = pd.read_csv('data3.csv')
data3 = df3['Math_score'].tolist()
meanOfSample3 = statistics.mean(data3)
fig = ff.create_distplot([mean_list],['Maths Score'],show_hist=False)
fig.add_trace(go.Scatter(x = [mean,mean], y = [0,0.2],mode = 'lines',name = 'MEAN'))
fig.add_trace(go.Scatter(x = [meanOfSample3,meanOfSample3], y = [0,0.2], mode = 'lines',name = 'Mean of sample3'))
fig.add_trace(go.Scatter(x = [firstStdEnd,firstStdEnd], y = [0,0.2],mode = 'lines',name = 'First standard deviation end'))
fig.add_trace(go.Scatter(x = [secondStdEnd,secondStdEnd], y = [0,0.2],mode = 'lines',name = 'Second standard deviation end'))
fig.add_trace(go.Scatter(x = [thirdStdEnd,thirdStdEnd], y = [0,0.2],mode = 'lines',name = 'Third standard deviation end'))
fig.show()
zScore1 = (meanOfSample1 - mean)/std_deviation
print('Z score of sample1: ',zScore1)
zScore2 = (meanOfSample2 - mean)/std_deviation
print('Z score of sample2: ',zScore2)
zScore3 = (meanOfSample3 - mean)/std_deviation
print('Z score of sample3: ',zScore3)
|
import FWCore.ParameterSet.Config as cms
from FastSimulation.Event.ParticleFilter_cfi import ParticleFilterBlock
from FastSimulation.SimplifiedGeometryPropagator.TrackerMaterial_cfi import TrackerMaterialBlock
fastSimProducer = cms.EDProducer(
"FastSimProducer",
src = cms.InputTag("generatorSmeared"),
particleFilter = ParticleFilterBlock.ParticleFilter,
detectorDefinition = TrackerMaterialBlock.TrackerMaterial,
beamPipeRadius = cms.double(3.),
interactionModels = cms.PSet(
#simpleLayerHits = cms.PSet(
# className = cms.string("fastsim::SimpleLayerHitProducer")
# ),
trackerSimHits = cms.PSet(
className = cms.string("fastsim::TrackerSimHitProducer")
),
bremsstrahlung = cms.PSet(
className = cms.string("fastsim::Bremsstrahlung"),
minPhotonEnergy = cms.double(0.1),
minPhotonEnergyFraction = cms.double(0.005)
),
dummyHits = cms.PSet(
className = cms.string("fastsim::DummyHitProducer")
),
),
)
|
#_*_ coding:utf-8_*_
'''
1. using RNN to train poems
2. support checkpoint support
3. support specify the first word of poem when doing inference
4. need begin and stop token for each sentences, why?
5. try using <UNK> token?
6. only need to unpack the lastone by using split(':')
7. reshape((,-1)) 's method
'''
import collections as cl
import random
import mxnet.ndarray as nd
from mxnet.gluon import nn,autograd
from mxnet import gluon
import numpy as np
import mxnet as mx
#没有考虑停止词
'''
todo:
1. remove punctuation
2. add begin and end token in each sentence
3. remove extra space
4. for words not in dict ,use <UNK> token instead
'''
def transform_data(path,num_steps):
poems = []
start_token = 'B'
end_token = 'E'
unknown_token = 'U'
with open(path,encoding='utf-8') as fh:
lines = fh.readlines()
for line in lines:
*title,content = line.strip().split(':')
content.replace(' ','')
#content may be empty
if (len(content)>num_steps or len(content)<5):
continue
content = content.replace(',',"")
content = content.replace('。',"")
content = start_token+content+end_token
poems.append(content)
#逗号怎么处理呢?
lines = sorted(poems,key = lambda x:len(x))
words = []
for line in lines:
#可能包含逗号
words += [ word for word in line]
#generate word_int_map
word_counter = cl.Counter(words)
#dict type
words_list = sorted(word_counter.items(),key = lambda x:-x[1])
#zip的用法?
unique_word,_=zip(*words_list)
#generate dict
word_to_int = dict(zip(unique_word,range(len(unique_word))))
int_to_word = unique_word
# map original sentences to ints
#corpus_vec=[]
#for line in poems:
# cur_vec = [word_to_int[x] for x in line]
# corpus_vec.append(cur_vec)
#return corpus_vec,word_to_int
#here is wrong , I need correct this later
poems_vec = [list(map(lambda l:word_to_int.get(l,'U'),poem)) for poem in poems]
#poems_vec = [list(map(lambda l:word_to_int.get(l,word_to_int.get(' ')),poem)) for poem in poems]
return poems_vec,word_to_int,int_to_word
def train_test_split(poems_vec):
# 70% for training and 30% for testing
corpus_len = len(poems_vec)
random.shuffle(poems_vec)
training_pos = int(corpus_len*0.7)
training_vec = poems_vec[0:training_pos]
testing_vec = poems_vec[training_pos:]
return training_vec,testing_vec
def generate_batch(poems_vec,word_to_int,batch_size,ctx=mx.cpu()):
num_batch = len(poems_vec)//batch_size
idx_batch = list(range(num_batch))
random.shuffle(idx_batch)
#num_steps
max_len = max(map(len,poems_vec))
for idx in idx_batch:
start_pos = idx*batch_size
end_pos = (idx+1)*batch_size
#batch_data = poems_vec[start_pos:end_pos]
batch_data = nd.full((batch_size,max_len),0,ctx=ctx)
for row,line in enumerate(poems_vec[start_pos:end_pos]):
temp = nd.array(line)
batch_data[row,0:len(line)] = temp
#generate label
batch_label=batch_data.copy()
batch_label[:,0:batch_label.shape[1]-1] = batch_data[:,1:batch_label.shape[1]]
#batch_size * num_steps
yield (batch_data,batch_label)
# get reuseble generator
class ReusableGenerator:
def __init__(self, generator_factory,corpus_vec,word2int,batch_size,ctx):
self.generator_factory = generator_factory
self.corpus_vec = corpus_vec
random.shuffle(self.corpus_vec)
self.word2int = word2int
self.batch_size = batch_size
self.ctx= ctx
def __iter__(self):
return self.generator_factory(self.corpus_vec,self.word2int,self.batch_size,self.ctx)
#define network
if __name__=='__main__':
poem_vec,word2int,int2word = transform_data('../input/poems.txt',64)
data_iter = generate_batch(poem_vec,word2int,batch_size=32)
for i,(data,label) in enumerate(data_iter):
print(i)
print(data.shape)
print(label.shape)
|
"""
<Program Name>
storage.py
<Author>
Joshua Lock <jlock@vmware.com>
<Started>
April 9, 2020
<Copyright>
See LICENSE for licensing information.
<Purpose>
Provides an interface for filesystem interactions, StorageBackendInterface.
"""
import errno
import logging
import os
import shutil
import stat
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
from typing import IO, BinaryIO, Iterator, List, Optional
from securesystemslib import exceptions
logger = logging.getLogger(__name__)
class StorageBackendInterface(metaclass=ABCMeta):
"""
<Purpose>
Defines an interface for abstract storage operations which can be implemented
for a variety of storage solutions, such as remote and local filesystems.
"""
@abstractmethod
@contextmanager
def get(self, filepath: str) -> Iterator[BinaryIO]:
"""
<Purpose>
A context manager for 'with' statements that is used for retrieving files
from a storage backend and cleans up the files upon exit.
with storage_backend.get('/path/to/file') as file_object:
# operations
# file is now closed
<Arguments>
filepath:
The full path of the file to be retrieved.
<Exceptions>
securesystemslib.exceptions.StorageError, if the file does not exist or is
no accessible.
<Returns>
A ContextManager object that emits a file-like object for the file at
'filepath'.
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def put(
self, fileobj: IO, filepath: str, restrict: Optional[bool] = False
) -> None:
"""
<Purpose>
Store a file-like object in the storage backend.
The file-like object is read from the beginning, not its current
offset (if any).
<Arguments>
fileobj:
The file-like object to be stored.
filepath:
The full path to the location where 'fileobj' will be stored.
restrict:
Whether the file should be created with restricted permissions.
What counts as restricted is backend-specific. For a filesystem on a
UNIX-like operating system, that may mean read/write permissions only
for the user (octal mode 0o600). For a cloud storage system, that
likely means Cloud provider specific ACL restrictions.
<Exceptions>
securesystemslib.exceptions.StorageError, if the file can not be stored.
<Returns>
None
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def remove(self, filepath: str) -> None:
"""
<Purpose>
Remove the file at 'filepath' from the storage.
<Arguments>
filepath:
The full path to the file.
<Exceptions>
securesystemslib.exceptions.StorageError, if the file can not be removed.
<Returns>
None
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def getsize(self, filepath: str) -> int:
"""
<Purpose>
Retrieve the size, in bytes, of the file at 'filepath'.
<Arguments>
filepath:
The full path to the file.
<Exceptions>
securesystemslib.exceptions.StorageError, if the file does not exist or is
not accessible.
<Returns>
The size in bytes of the file at 'filepath'.
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def create_folder(self, filepath: str) -> None:
"""
<Purpose>
Create a folder at filepath and ensure all intermediate components of the
path exist.
Passing an empty string for filepath does nothing and does not raise an
exception.
<Arguments>
filepath:
The full path of the folder to be created.
<Exceptions>
securesystemslib.exceptions.StorageError, if the folder can not be
created.
<Returns>
None
"""
raise NotImplementedError # pragma: no cover
@abstractmethod
def list_folder(self, filepath: str) -> List[str]:
"""
<Purpose>
List the contents of the folder at 'filepath'.
<Arguments>
filepath:
The full path of the folder to be listed.
<Exceptions>
securesystemslib.exceptions.StorageError, if the file does not exist or is
not accessible.
<Returns>
A list containing the names of the files in the folder. May be an empty
list.
"""
raise NotImplementedError # pragma: no cover
class FilesystemBackend(StorageBackendInterface):
"""
<Purpose>
A concrete implementation of StorageBackendInterface which interacts with
local filesystems using Python standard library functions.
"""
# As FilesystemBackend is effectively a stateless wrapper around various
# standard library operations, we only ever need a single instance of it.
# That single instance is safe to be (re-)used by all callers. Therefore
# implement the singleton pattern to avoid uneccesarily creating multiple
# objects.
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
@contextmanager
def get(self, filepath: str) -> Iterator[BinaryIO]:
file_object = None
try:
file_object = open(filepath, "rb")
yield file_object
except OSError:
raise exceptions.StorageError( # pylint: disable=raise-missing-from
"Can't open %s" # pylint: disable=consider-using-f-string
% filepath
)
finally:
if file_object is not None:
file_object.close()
def put(
self, fileobj: IO, filepath: str, restrict: Optional[bool] = False
) -> None:
# If we are passed an open file, seek to the beginning such that we are
# copying the entire contents
if not fileobj.closed:
fileobj.seek(0)
# If a file with the same name already exists, the new permissions
# may not be applied.
try:
os.remove(filepath)
except OSError:
pass
try:
if restrict:
# On UNIX-based systems restricted files are created with read and
# write permissions for the user only (octal value 0o600).
fd = os.open(
filepath,
os.O_WRONLY | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR,
)
else:
# Non-restricted files use the default 'mode' argument of os.open()
# granting read, write, and execute for all users (octal mode 0o777).
# NOTE: mode may be modified by the user's file mode creation mask
# (umask) or on Windows limited to the smaller set of OS supported
# permisssions.
fd = os.open(filepath, os.O_WRONLY | os.O_CREAT)
with os.fdopen(fd, "wb") as destination_file:
shutil.copyfileobj(fileobj, destination_file)
# Force the destination file to be written to disk from Python's internal
# and the operating system's buffers. os.fsync() should follow flush().
destination_file.flush()
os.fsync(destination_file.fileno())
except OSError:
raise exceptions.StorageError( # pylint: disable=raise-missing-from
"Can't write file %s" # pylint: disable=consider-using-f-string
% filepath
)
def remove(self, filepath: str) -> None:
try:
os.remove(filepath)
except (
FileNotFoundError,
PermissionError,
OSError,
): # pragma: no cover
raise exceptions.StorageError( # pylint: disable=raise-missing-from
"Can't remove file %s" # pylint: disable=consider-using-f-string
% filepath
)
def getsize(self, filepath: str) -> int:
try:
return os.path.getsize(filepath)
except OSError:
raise exceptions.StorageError( # pylint: disable=raise-missing-from
"Can't access file %s" # pylint: disable=consider-using-f-string
% filepath
)
def create_folder(self, filepath: str) -> None:
try:
os.makedirs(filepath)
except OSError as e:
# 'OSError' raised if the leaf directory already exists or cannot be
# created. Check for case where 'filepath' has already been created and
# silently ignore.
if e.errno == errno.EEXIST:
pass
elif e.errno == errno.ENOENT and not filepath:
raise exceptions.StorageError(
"Can't create a folder with an empty filepath!"
)
else:
raise exceptions.StorageError(
"Can't create folder at %s" # pylint: disable=consider-using-f-string
% filepath
)
def list_folder(self, filepath: str) -> List[str]:
try:
return os.listdir(filepath)
except FileNotFoundError:
raise exceptions.StorageError( # pylint: disable=raise-missing-from
"Can't list folder at %s" # pylint: disable=consider-using-f-string
% filepath
)
|
from unittest import TestCase,main
from vector3d import Vector3D
from .rect import Rect
class TestRect(TestCase):
def test1(self):
rect=Rect(Vector3D(0,0),1,1)
self.assertTrue(Vector3D(0,0) in rect)
self.assertTrue(Vector3D(1, 1) in rect)
self.assertFalse(Vector3D(0, 1.1) in rect)
self.assertTrue(rect.__contains__(Vector3D(0,1.1),tol=0.11))
rect1=Rect(Vector3D(1,1),1,1)
a,b=rect.get_dist_from_rect(rect1)
self.assertEqual([0,0],[a,b])
rect1 = Rect(Vector3D(-1, 0), 1, 1)
a, b = rect.get_dist_from_rect(rect1)
self.assertEqual([0, 0], [a, b])
rect1 = Rect(Vector3D(-1, 0), 2, 2)
a, b = rect.get_dist_from_rect(rect1)
self.assertEqual([0, 0], [a, b])
rect1 = Rect(Vector3D(-1, 0), 0.5, 0.5)
a, b = rect.get_dist_from_rect(rect1)
self.assertEqual([0.5, 0], [a, b])
def test2(self):
rect = Rect(Vector3D(0, 0), 1, 1,3.14/6)
self.assertFalse(Vector3D(1, 1) in rect)
self.assertTrue(Vector3D(0.366, 1.3) in rect)
if __name__ == '__main__':
main()
|
from argparse import ArgumentParser
import tensorflow as tf
import tensorflow.keras as keras
import matplotlib.pyplot as plt
import numpy as np
from functools import partial
import os
from os import makedirs
import time
from IPython import display
from model import Encoder, Generator, Critic
from loss import W_loss
from util import plot_images, mh_update, gradient_penalty
parser = ArgumentParser(description='bigan')
parser.add_argument('--out_dir', type=str, action='store')
args = parser.parse_args()
# Hyperparameters
BUFFER_SIZE = 50000
BATCH_SIZE = 64
EPOCHS = 100
LATENT_DIM = 200
GP_WEIGHT = 10
NUM_CRITIC = 5
SIGMA = 1.
GAMMA = 0.1
NUM_EXAMPLES = 20
NUM_CHANNELS = 3
NUM_STEPS = 10
# Create a directory
makedirs(args.out_dir, exist_ok=True)
# # Load data
(train_images, train_labels), (_, _) = tf.keras.datasets.cifar10.load_data()
train_images = train_images.astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
# (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
# train_images = np.pad(train_images, [(0,0), (2,2), (2,2)])
# train_images = train_images.reshape(train_images.shape[0], 32, 32, 1).astype('float32')
# train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
# Define models
enc = Encoder(train_images[0].shape, LATENT_DIM)
gen = Generator(train_images[0].shape, LATENT_DIM)
crit = Critic(train_images[0].shape, LATENT_DIM)
# Define optimizers
eg_optimizer = tf.keras.optimizers.Adam(1e-4, 0.5, 0.9)
c_optimizer = tf.keras.optimizers.Adam(1e-4, 0.5, 0.9)
# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
# Checkpoint
# wmhbg_ckpt_dir = './wmhbg_checkpoints'
wmhbg_ckpt_dir = os.path.join(args.out_dir, 'wmhbg_checkpoints')
wmhbg_ckpt_prefix = os.path.join(wmhbg_ckpt_dir, 'wmhbg_ckpt')
wmhbg_ckpt = tf.train.Checkpoint(step=tf.Variable(0), eg_optimizer=eg_optimizer, c_optimizer=c_optimizer,
enc=enc, gen=gen, crit=crit)
wmhbg_manager = tf.train.CheckpointManager(wmhbg_ckpt, wmhbg_ckpt_dir, max_to_keep=1)
# Train steps
@tf.function
def train_step_c(batch_x, k):
with tf.GradientTape() as eg_tape, tf.GradientTape() as c_tape:
x = batch_x
ex = enc(x, training=True)
z = tf.random.normal([x.shape[0], LATENT_DIM])
gz = gen(z, training=True)
ex1 = tf.scan(mh_update, GAMMA * tf.ones(k), ex)[-1]
x1 = gen(ex1, training=True)
x_ex = crit([x, ex], training=True)
gz_z = crit([gz, z], training=True)
x1_ex1 = crit([x1, ex1], training=True)
gp1 = gradient_penalty(partial(crit, training=True), x, ex, z, gz)
gp2 = gradient_penalty(partial(crit, training=True), x, ex, ex1, x1)
c_loss = 0.5 * (W_loss(x_ex, gz_z) + GP_WEIGHT * gp1) + 0.5 * (W_loss(x_ex, x1_ex1) + GP_WEIGHT * gp2)
c_gradient = c_tape.gradient(c_loss, crit.trainable_variables)
c_optimizer.apply_gradients(zip(c_gradient, crit.trainable_variables))
return c_loss
@tf.function
def train_step_eg(batch_x, k):
with tf.GradientTape() as eg_tape:
x = batch_x
ex = enc(x, training=True)
z = tf.random.normal([x.shape[0], LATENT_DIM])
gz = gen(z, training=True)
ex1 = tf.scan(mh_update, GAMMA * tf.ones(k), ex)[-1]
x1 = gen(ex1, training=True)
x_ex = crit([x, ex], training=True)
gz_z = crit([gz, z], training=True)
x1_ex1 = crit([x1, ex1], training=True)
eg_loss = - 0.5 * W_loss(x_ex, gz_z) - 0.5 * W_loss(x_ex, x1_ex1)
eg_gradient = eg_tape.gradient(eg_loss, enc.trainable_variables + gen.trainable_variables)
eg_optimizer.apply_gradients(zip(eg_gradient, enc.trainable_variables + gen.trainable_variables))
return eg_loss
def train(dataset, n_epoch):
wmhbg_ckpt.restore(wmhbg_manager.latest_checkpoint)
for epoch in range(n_epoch):
start = time.time()
eg_loss, c_loss = 0, 0
k = np.random.choice(NUM_STEPS) + 1
for batch in dataset:
for _ in range(NUM_CRITIC):
c_loss_batch = train_step_c(batch, k)
eg_loss_batch = train_step_eg(batch, k)
eg_loss += eg_loss_batch
c_loss += c_loss_batch
wmhbg_ckpt.step.assign_add(1)
wmhbg_manager.save()
display.clear_output(wait=True)
x0 = train_images[0:NUM_EXAMPLES]
ex0 = enc(x0, training=False)
gex0 = gen(ex0, training=False)
plot_images(int(wmhbg_ckpt.step), x0, gex0, args.out_dir, 'reconstruct')
# tf.random.set_seed(10)
z = tf.random.normal([NUM_EXAMPLES, LATENT_DIM])
gz = gen(z, training=False)
ex_mh = tf.scan(mh_update, GAMMA * tf.ones(10), ex0)[-1]
gex_mh = gen(ex_mh, training=False)
plot_images(int(wmhbg_ckpt.step), gz, gex_mh, args.out_dir, 'generate_mh')
print ('Time for epoch {} is {} sec'.format(int(wmhbg_ckpt.step), time.time()-start))
print ('G loss is {} and D loss is {}'.format(eg_loss, c_loss))
# Train
train(train_dataset, EPOCHS)
# wmhbg_ckpt.save(file_prefix = wmhbg_ckpt_prefix)
|
# Copyright 2018 Regents of the University of Colorado. All Rights Reserved.
# Released under the MIT license.
# This software was developed at the University of Colorado's Laboratory for Atmospheric and Space Physics.
# Verify current version before use at: https://github.com/MAVENSDC/Pytplot
import pytplot
import copy
def crop(tvar1,tvar2, replace=True):
"""
Crops both tplot variable so that their times are the same. This is done automatically by other processing routines if times do not match up.
Parameters:
tvar1 : str
Name of the first tplot variable
tvar2 : str
Name of the second tplot variable
replace : bool, optional
If true, the data in the original tplot variables are replaced. Otherwise, new variables are created.
Returns:
None
Examples:
>>> pytplot.store_data('a', data={'x':[0,4,8,12,16], 'y':[1,2,3,4,5]})
>>> pytplot.store_data('b', data={'x':[2,5,8,11,14,17,20], 'y':[[1,1,1,1,1,1],[2,2,5,4,1,1],[100,100,3,50,1,1],[4,4,8,58,1,1],[5,5,9,21,1,1],[6,6,2,2,1,1],[7,7,1,6,1,1]]})
>>> pytplot.crop('a','b')
"""
# grab time and data arrays
tv1 = pytplot.data_quants[tvar1].copy()
tv2 = pytplot.data_quants[tvar2].copy()
# find first and last time indices
t0_1 = tv1.coords['time'][0]
t0_2 = tv2.coords['time'][0]
tx_1 = tv1.coords['time'][-1]
tx_2 = tv2.coords['time'][-1]
# find cut locations
cut1 = max([t0_1, t0_2])
cut2 = min([tx_1, tx_2])
# trim data
tv1 = tv1.sel(time=slice(cut1, cut2))
tv1.attrs = copy.deepcopy(pytplot.data_quants[tvar1].attrs)
tv2 = tv2.sel(time=slice(cut1, cut2))
tv2.attrs = copy.deepcopy(pytplot.data_quants[tvar2].attrs)
# Replace the variables if specified
if replace:
pytplot.data_quants[tvar1] = tv1
pytplot.data_quants[tvar1].name = tvar1
pytplot.data_quants[tvar2] = tv2
pytplot.data_quants[tvar2].name = tvar2
return
else:
pytplot.data_quants[tvar1 + '_cropped'] = copy.deepcopy(tv1)
pytplot.data_quants[tvar1 + '_cropped'].attrs = copy.deepcopy(tv1.attrs)
pytplot.data_quants[tvar1 + '_cropped'].name = tvar1+ '_cropped'
pytplot.data_quants[tvar2 + '_cropped'] = copy.deepcopy(tv2)
pytplot.data_quants[tvar2 + '_cropped'].attrs = copy.deepcopy(tv2.attrs)
pytplot.data_quants[tvar2 + '_cropped'].name = tvar2 + '_cropped'
return tvar2 + '_cropped' |
# File: sshcustodian/sshcustodian.py
# -*- coding: utf-8 -*-
# Python 2/3 Compatibility
from __future__ import (unicode_literals, division, absolute_import,
print_function)
from six.moves import filterfalse
"""
This module creates a subclass of the main Custodian class in the Custodian
project (github.com/materialsproject/custodian), which is a wrapper that
manages jobs running on computing clusters. The Custodian module is part of The
Materials Project (materialsproject.org/).
This subclass adds the functionality to copy the temporary directory created
via monty to the scratch partitions on slave compute nodes, provided that the
cluster's filesystem is configured in this way. The implementation invokes a
subprocess to utilize the ssh executable installed on the cluster, so it is not
particularly elegant or platform independent, nor is this solution likely to be
general to all clusters. This is why this modification has not been submitted
as a pull request to the main Custodian project.
"""
# Import modules
import logging
import subprocess
import sys
import datetime
import time
import os
import re
from itertools import islice, groupby
from socket import gethostname
from monty.tempfile import ScratchDir
from monty.shutil import gzip_dir
from monty.json import MontyEncoder
from monty.serialization import dumpfn
from custodian.custodian import Custodian
from custodian.custodian import CustodianError
# Module-level logger
logger = logging.getLogger(__name__)
class SSHCustodian(Custodian):
"""
The SSHCustodian class modifies the Custodian class from the custodian
module to be able to handle clusters that have separate scratch partitions
for each node. When scratch_dir_node_only is enabled, the temp_dir that
monty creates will be copied to all other compute nodes used in the
calculation and subsequently removed when the job is finished.
"""
__doc__ += Custodian.__doc__
def __init__(self, handlers, jobs, validators=None, max_errors=1,
polling_time_step=10, monitor_freq=30,
skip_over_errors=False, scratch_dir=None,
gzipped_output=False, checkpoint=False,
scratch_dir_node_only=False, pbs_nodefile=None):
""" scratch_dir_node_only (bool): If set to True, custodian will grab
the list of nodes in the file path provided to pbs_nodefile and
use copy the temp_dir to the scratch_dir on each node over
ssh. This is necessary on cluster setups where each node has
its own independent scratch partition.
pbs_nodefile (str): The filepath to the list of nodes to be used in
a calculation. If this path does not point to a valid file,
then scratch_dir_node_only will be automatically set to False.
"""
super(SSHCustodian, self).__init__(handlers, jobs, validators,
max_errors, polling_time_step,
monitor_freq, skip_over_errors,
scratch_dir, gzipped_output,
checkpoint)
self.hostname = gethostname()
if pbs_nodefile is None:
self.scratch_dir_node_only = False
self.slave_compute_node_list = None
elif os.path.exists(pbs_nodefile):
self.scratch_dir_node_only = scratch_dir_node_only
self.pbs_nodefile = pbs_nodefile
self.slave_compute_node_list = (
self._process_pbs_nodefile(self.pbs_nodefile, self.hostname))
else:
self.scratch_dir_node_only = False
self.pbs_nodefile = None
self.slave_compute_node_list = None
@staticmethod
def _process_pbs_nodefile(pbs_nodefile, hostname):
with open(pbs_nodefile) as in_file:
nodelist = in_file.read().splitlines()
slave_compute_node_list = [
node for node, _ in groupby(filterfalse(lambda x: x == hostname,
nodelist))
]
return slave_compute_node_list
def _copy_to_slave_node_dirs(self, temp_dir_path):
"""
Copy temporary scratch directory from master node to other nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = ['rsync', '-azhq', temp_dir_path,
'{0}:{1}'.format(node,
os.path.abspath(self.scratch_dir))]
p = subprocess.Popen(command, shell=False)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _update_slave_node_vasp_input_files(self, temp_dir_path):
"""
Update VASP input files in the scratch partition on the slave compute
nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
VASP_INPUT_FILES = [x for x in ["{0}/CHGCAR".format(temp_dir_path),
"{0}/WAVECAR".format(temp_dir_path),
"{0}/INCAR".format(temp_dir_path),
"{0}/POSCAR".format(temp_dir_path),
"{0}/POTCAR".format(temp_dir_path),
"{0}/KPOINTS".format(temp_dir_path)] if
os.path.exists(x)]
process_list = []
for node in self.slave_compute_node_list:
for filepath in VASP_INPUT_FILES:
command = 'scp {0} {1}:{2}/'.format(filepath, node,
temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for syncing to finish before moving on
for process in process_list:
process.wait()
def _delete_slave_node_dirs(self, temp_dir_path):
"""
Delete the temporary scratch directory on the slave nodes.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
It is assumed here that the root path of the scratch directory
is the same on all nodes.
"""
process_list = []
for node in self.slave_compute_node_list:
command = 'ssh {0} "rm -rf {1}"'.format(node, temp_dir_path)
p = subprocess.Popen(command, shell=True)
process_list.append(p)
# Wait for deletion to finish before moving on
for process in process_list:
process.wait()
def _manage_node_scratch(self, temp_dir_path, job_start):
"""
Checks whether the user wants to make use of scratch partitions on each
compute node, and if True, either copies the temporary directory to or
deletes the temporary directory from each slave compute node. If the
user does not specify to use node-specific scratch partitions, then the
function does nothing.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job_start (bool): If True, then the job has started and the
temporary directory will be copied to the slave compute
nodes. If False, then the temporary directories will be deleted
from the slave compute nodes.
"""
if self.scratch_dir_node_only:
if job_start:
self._copy_to_slave_node_dirs(temp_dir_path)
else:
self._delete_slave_node_dirs(temp_dir_path)
else:
pass
def _update_node_scratch(self, temp_dir_path, job):
"""
Method to update the scratch partitions on the slave compute nodes
if they exist and are running a VASP job.
Args:
temp_dir_path (str): The path to the temporary scratch directory.
job (object): The job object you intend to run. Currently supports
VASP jobs.
"""
vasp_re = re.compile(r'vasp')
if self.scratch_dir is not None:
try:
jobtype = job.get_jobtype()
if self.scratch_dir_node_only:
if vasp_re.match(jobtype):
self._update_slave_node_vasp_input_files(temp_dir_path)
else:
pass
else:
pass
except:
pass
else:
pass
def run(self):
"""
Override of Custodian.run() to include instructions to copy the
temp_dir to the scratch partition on slave compute nodes if requested.
"""
cwd = os.getcwd()
with ScratchDir(self.scratch_dir, create_symbolic_link=True,
copy_to_current_on_exit=True,
copy_from_current_on_enter=True) as temp_dir:
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=True)
self.total_errors = 0
start = datetime.datetime.now()
logger.info("Run started at {} in {}.".format(
start, temp_dir))
v = sys.version.replace("\n", " ")
logger.info("Custodian running on Python version {}".format(v))
try:
# skip jobs until the restart
for job_n, job in islice(enumerate(self.jobs, 1),
self.restart, None):
self._run_job(job_n, job, temp_dir)
# Checkpoint after each job so that we can recover from
# last point and remove old checkpoints
if self.checkpoint:
super(SSHCustodian, self)._save_checkpoint(cwd, job_n)
except CustodianError as ex:
logger.error(ex.message)
if ex.raises:
raise RuntimeError("{} errors reached: {}. Exited..."
.format(self.total_errors, ex))
finally:
# Log the corrections to a json file.
logger.info("Logging to {}...".format(super(SSHCustodian,
self).LOG_FILE))
dumpfn(self.run_log, super(SSHCustodian, self).LOG_FILE,
cls=MontyEncoder, indent=4)
end = datetime.datetime.now()
logger.info("Run ended at {}.".format(end))
run_time = end - start
logger.info("Run completed. Total time taken = {}."
.format(run_time))
# Remove duplicate copy of log file, provided it ends with
# ".log"
for x in ([x for x in os.listdir(temp_dir)
if re.match(r'\w*\.log', x)]):
os.remove(os.path.join(temp_dir, x))
self._manage_node_scratch(temp_dir_path=temp_dir,
job_start=False)
if self.gzipped_output:
gzip_dir(".")
# Cleanup checkpoint files (if any) if run is successful.
super(SSHCustodian, self)._delete_checkpoints(cwd)
return self.run_log
def _run_job(self, job_n, job, temp_dir):
"""
Overrides custodian.custodian._run_job() to propagate changes to input
files on different scratch partitions on compute nodes, if needed.
"""
self.run_log.append({"job": job.as_dict(), "corrections": []})
job.setup()
for attempt in range(1, self.max_errors - self.total_errors + 1):
# Propagate updated input files, if needed
self._update_node_scratch(temp_dir, job)
logger.info(
"Starting job no. {} ({}) attempt no. {}. Errors "
"thus far = {}.".format(
job_n, job.name, attempt, self.total_errors))
p = job.run()
# Check for errors using the error handlers and perform
# corrections.
has_error = False
# While the job is running, we use the handlers that are
# monitors to monitor the job.
if isinstance(p, subprocess.Popen):
if self.monitors:
n = 0
while True:
n += 1
time.sleep(self.polling_time_step)
if p.poll() is not None:
break
if n % self.monitor_freq == 0:
has_error = self._do_check(self.monitors,
p.terminate)
else:
p.wait()
logger.info("{}.run has completed. "
"Checking remaining handlers".format(job.name))
# Check for errors again, since in some cases non-monitor
# handlers fix the problems detected by monitors
# if an error has been found, not all handlers need to run
if has_error:
self._do_check([h for h in self.handlers
if not h.is_monitor])
else:
has_error = self._do_check(self.handlers)
# If there are no errors detected, perform
# postprocessing and exit.
if not has_error:
for v in self.validators:
if v.check():
s = "Validation failed: {}".format(v)
raise CustodianError(s, True, v)
job.postprocess()
return
# check that all errors could be handled
for x in self.run_log[-1]["corrections"]:
if not x["actions"] and x["handler"].raises_runtime_error:
s = "Unrecoverable error for handler: {}. " \
"Raising RuntimeError".format(x["handler"])
raise CustodianError(s, True, x["handler"])
for x in self.run_log[-1]["corrections"]:
if not x["actions"]:
s = "Unrecoverable error for handler: %s" % x["handler"]
raise CustodianError(s, False, x["handler"])
logger.info("Max errors reached.")
raise CustodianError("MaxErrors", True)
# Inherit Custodian docstrings
__init__.__doc__ = Custodian.__init__.__doc__ + __init__.__doc__
run.__doc__ = Custodian.run.__doc__
_run_job.__doc__ = Custodian._run_job.__doc__
|
from django.contrib.auth.models import User
from django.db import models
class Category(models.Model):
category = models.TextField()
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now=True)
|
#!/bin/python3
import os
import sys
#
# Complete the diagonalDifference function below.
#
def diagonalDifference(a):
i=0
n=len(a)
d1,d2=0,0
diff=0
for j in range(n):
d1+=a[j][j]
d2+=a[i+j][n-1-j]
diff=abs(d2-d1)
return diff
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
a = []
for _ in range(n):
a.append(list(map(int, input().rstrip().split())))
result = diagonalDifference(a)
f.write(str(result) + '\n')
f.close() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 09:27:37 2019
@author: arthurmendes
"""
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn import metrics
########################
# Fundamental Dataset Exploration
########################
GoT_df = pd.read_excel('GOT_character_predictions.xlsx')
GoT_df['out_house'] = 0
GoT_df['out_culture'] = 0
# Cultures with high probabylity of dying (Sum of isAlive/Count isAlive > 0.5)
cultures = ['Vale',
'Meereen',
'Lhazareen',
'Astapor',
'Valyrian',
'Astapori',
'Westerman',
'Pentoshi']
for val in enumerate(GoT_df.loc[ : , 'culture']):
'''
Creating a flag if the culture appear in the list of cultures, which means
that they have a high chance of dying.
'''
if val[1] in cultures:
GoT_df.loc[val[0], 'out_culture'] = 1
# Houses with high probabylity of dying (Sum of isAlive/Count isAlive > 0.5)
house = ['Thenn',
'Khal',
'House Toyne',
'House Rosby',
'House Roote',
'House Rambton',
'House Pemford',
'House Moore',
'House Mallery',
'House Grandison',
'House Farrow',
'House Egen',
'House Dustin',
'House Cole',
'House Cockshaw',
'House Chelsted',
'House Cerwyn',
'House Cafferen',
'House Bywater',
'House Byrch',
'House Bushy',
'House Ball',
'Good Masters',
'Band of Nine',
'House Blackwood',
'House Blackfyre',
'House Strong',
'House Cassel',
'House Targaryen',
'Stormcrows',
'House Darry',
'Brave Companions',
'House Velaryon',
'House Tully',
'House Clegane']
for val in enumerate(GoT_df.loc[ : , 'house']):
'''
Creating a flag if the house appear in the list of houses, which means
that they have a high probability of dying.
'''
if val[1] in house:
GoT_df.loc[val[0], 'out_house'] = 1
###############################################################################
# Data Preparation
###############################################################################
print(
GoT_df
.isnull()
.sum()
)
GoT_Chosen = GoT_df.loc[:,['S.No',
'male',
'book1_A_Game_Of_Thrones',
'book2_A_Clash_Of_Kings',
'book3_A_Storm_Of_Swords',
'book4_A_Feast_For_Crows',
'book5_A_Dance_with_Dragons',
'isAliveMother',
'isAliveFather',
'isAliveHeir',
'isAliveSpouse',
'isMarried',
'isNoble',
'numDeadRelations',
'popularity',
'isAlive',
'out_house',
'out_culture']]
# Flagging missing values
for col in GoT_Chosen:
""" Create columns that are 0s if a value was not missing and 1 if
a value is missing. """
if GoT_Chosen[col].isnull().any():
GoT_Chosen['m_'+col] = GoT_df[col].isnull().astype(int)
# Checking again
print(
GoT_Chosen
.isnull()
.sum()
)
GoT_data = GoT_Chosen.loc[:,['male',
'book1_A_Game_Of_Thrones',
'book2_A_Clash_Of_Kings',
'book3_A_Storm_Of_Swords',
'book4_A_Feast_For_Crows',
'book5_A_Dance_with_Dragons',
'isMarried',
'isNoble',
'numDeadRelations',
'popularity',
'm_isAliveFather',
'm_isAliveHeir',
'm_isAliveSpouse',
'out_house',
'out_culture']]
GoT_target = GoT_Chosen.loc[:,['isAlive']]
X_train, X_test, y_train, y_test = train_test_split(
GoT_data,
GoT_target.values.ravel(),
test_size = 0.25,
random_state = 508,
stratify = GoT_target)
###############################################################################
# Random Forest in scikit-learn
###############################################################################
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
max_depth = None,
min_samples_leaf = 11,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(X_train, y_train)
# Scoring the gini model
print('Training Score', full_gini_fit.score(X_train, y_train).round(4))
print('Testing Score:', full_gini_fit.score(X_test, y_test).round(4))
########################
# Parameter tuning with GridSearchCV
########################
from sklearn.model_selection import GridSearchCV
# Creating a hyperparameter grid
estimator_space = pd.np.arange(100, 1000, 100)
leaf_space = pd.np.arange(1, 200, 10)
criterion_space = ['gini', 'entropy']
bootstrap_space = [True, False]
warm_start_space = [True, False]
param_grid = {'n_estimators' : estimator_space,
'min_samples_leaf' : leaf_space,
'criterion' : criterion_space,
'bootstrap' : bootstrap_space,
'warm_start' : warm_start_space}
# Building the model object one more time
full_forest_grid = RandomForestClassifier(max_depth = None,
random_state = 508)
# Creating a GridSearchCV object
full_forest_cv = GridSearchCV(full_forest_grid, param_grid, cv = 3)
# Fit it to the training data
full_forest_cv.fit(X_train, y_train)
# Print the optimal parameters and best score
print("Tuned Logistic Regression Parameter:", full_forest_cv.best_params_)
print("Tuned Logistic Regression Accuracy:", full_forest_cv.best_score_.round(4))
'''
Tuned Logistic Regression Parameter: {'bootstrap': False, 'criterion': 'gini',
'min_samples_leaf': 11, 'n_estimators': 100, 'warm_start': True}
Tuned Logistic Regression Accuracy: 0.8108
'''
########################
# Parameter tuning with GridSearchCV
########################
# Creating a hyperparameter grid
estimator_space = pd.np.arange(50, 130, 10)
leaf_space = pd.np.arange(1, 50, 3)
criterion_space = ['gini']
bootstrap_space = [False]
warm_start_space = [True]
param_grid = {'n_estimators' : estimator_space,
'min_samples_leaf' : leaf_space,
'criterion' : criterion_space,
'bootstrap' : bootstrap_space,
'warm_start' : warm_start_space}
# Building the model object one more time
full_hyped = RandomForestClassifier(max_depth = None,
random_state = 508)
# Creating a GridSearchCV object
hyped_model = GridSearchCV(full_hyped, param_grid, cv = 3)
# Fit it to the training data
hyped_model.fit(X_train, y_train)
'''
Tuned Logistic Regression Parameter: {'bootstrap': False, 'criterion': 'gini',
'min_samples_leaf': 4, 'n_estimators': 90, 'warm_start': True}
Tuned Logistic Regression Accuracy: 0.8122
'''
# Print the optimal parameters and best score
print("Tuned Logistic Regression Parameter:", hyped_model.best_params_)
print("Tuned Logistic Regression Accuracy:", hyped_model.best_score_.round(4))
###############################################################################
# Random Forest in scikit-learn
###############################################################################
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 90,
criterion = 'gini',
max_depth = None,
min_samples_leaf = 15,
bootstrap = False,
warm_start = True,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(X_train, y_train)
full_forest_predict = full_forest_gini.predict(X_test)
# Scoring the gini model
print('Training Score', full_gini_fit.score(X_train, y_train).round(4))
print('Testing Score:', full_gini_fit.score(X_test, y_test).round(4))
# Saving score objects
gini_full_train = full_gini_fit.score(X_train, y_train)
gini_full_test = full_gini_fit.score(X_test, y_test)
###############################################################################
# ROC curve
###############################################################################
# Import necessary modules
from sklearn.metrics import roc_curve
# Compute predicted probabilities: y_pred_prob
y_pred_prob = full_gini_fit.predict_proba(X_test)[:,1]
# Generate ROC curve values: fpr, tpr, thresholds
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
# Plot ROC curve
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
metrics.auc(fpr, tpr)
###############################################################################
# Variable importance
###############################################################################
import pandas as pd
feature_importances = pd.DataFrame(full_gini_fit.feature_importances_,
index = X_train.columns,
columns=
['importance']).sort_values('importance',
ascending=False)
print(feature_importances)
print ('\nClasification report:\n', classification_report(y_test, full_forest_predict))
print ('\nConfussion matrix:\n',confusion_matrix(y_test, full_forest_predict))
###############################################################################
# Gradient Boosted Machines
###############################################################################
from sklearn.ensemble import GradientBoostingClassifier
# Building a weak learner gbm
gbm_3 = GradientBoostingClassifier(loss = 'deviance',
learning_rate = 1.5,
n_estimators = 100,
max_depth = 3,
criterion = 'friedman_mse',
warm_start = True,
random_state = 508,)
gbm_basic_fit = gbm_3.fit(X_train, y_train)
gbm_basic_predict = gbm_basic_fit.predict(X_test)
# Training and Testing Scores
print('Training Score', gbm_basic_fit.score(X_train, y_train).round(4))
print('Testing Score:', gbm_basic_fit.score(X_test, y_test).round(4))
gbm_basic_train = gbm_basic_fit.score(X_train, y_train)
gmb_basic_test = gbm_basic_fit.score(X_test, y_test)
########################
# Applying GridSearchCV
########################
from sklearn.model_selection import GridSearchCV
# Creating a hyperparameter grid
learn_space = pd.np.arange(0.1, 1.6, 0.1)
estimator_space = pd.np.arange(50, 250, 50)
depth_space = pd.np.arange(1, 10)
criterion_space = ['friedman_mse', 'mse', 'mae']
param_grid = {'learning_rate' : learn_space,
'max_depth' : depth_space,
'criterion' : criterion_space,
'n_estimators' : estimator_space}
# Building the model object one more time
gbm_grid = GradientBoostingClassifier(random_state = 508)
# Creating a GridSearchCV object
gbm_grid_cv = GridSearchCV(gbm_grid, param_grid, cv = 3)
# Fit it to the training data
gbm_grid_cv.fit(X_train, y_train)
# Print the optimal parameters and best score
print("Tuned GBM Parameter:", gbm_grid_cv.best_params_)
print("Tuned GBM Accuracy:", gbm_grid_cv.best_score_.round(4))
|
#!/usr/bin/env python3
from env import env
from run_common import AWSCli
aws_cli = AWSCli()
def describe_default_lambda(func_info):
for el in env['lambda']:
if func_info['FunctionName'] == el['NAME'] and el['TYPE'] == 'cron':
return True
return False
def describe_cron_lambda(func_info):
for el in env['lambda']:
if func_info['FunctionName'] == el['NAME'] and el['TYPE'] == 'default':
return True
return False
results = list()
cmd = ['lambda', 'list-functions']
result = aws_cli.run(cmd)
default_lambda_count = 0
cron_lambda_count = 0
for func in result['Functions']:
if describe_default_lambda(func):
default_lambda_count += 1
if describe_cron_lambda(func):
cron_lambda_count += 1
if default_lambda_count > 0:
results.append('Lambda (default) -------------- O')
else:
results.append('Lambda (default) -------------- X')
if cron_lambda_count > 0:
results.append('Lambda (cron) -------------- O')
else:
results.append('Lambda (cron) -------------- X')
print('#' * 80)
for r in results:
print(r)
print('#' * 80)
|
# Generated by Django 2.2.10 on 2020-03-05 15:30
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("gdpr_helpers", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="LegalReasonGroup",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"where",
models.CharField(
choices=[
("contact_form", "Contact form"),
("registration_form", "Registration form"),
("landing_form", "Landing form"),
],
max_length=20,
unique=True,
verbose_name="Posizione del gruppo",
),
),
],
options={
"verbose_name": "Gruppo ragioni legali",
"verbose_name_plural": "Gruppi ragioni legali",
},
),
migrations.RemoveField(model_name="privacylog", name="user"),
migrations.AddField(
model_name="privacylog",
name="content_type",
field=models.ForeignKey(
default=1,
on_delete=django.db.models.deletion.PROTECT,
to="contenttypes.ContentType",
),
preserve_default=False,
),
migrations.AddField(
model_name="privacylog",
name="object_id",
field=models.PositiveIntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name="legalreason",
name="legal_group",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="legal_reasons",
to="gdpr_helpers.LegalReasonGroup",
verbose_name="Gruppo di ragioni legali",
),
),
]
|
from . import work_learn_problem as wlp
_actions = wlp.actions_all(
n_skills=1,
n_question_types=1,
tell=False,
exp=False,
)
_observations = wlp.observations(
n_question_types=1,
)
WORK = _actions.index(wlp.Action('ask'))
TEST = _actions.index(wlp.Action('ask', 0))
BOOT = _actions.index(wlp.Action('boot'))
O_TERM = _observations.index('term')
O_NULL = _observations.index('null')
O_RIGHT = _observations.index('r')
O_WRONG = _observations.index('w')
DEFAULT_CONFIG = {
'zmdp_discount': 0.99,
'zmdp_timeout': 600,
'utility_type': 'pen',
"cost": -0.000001,
"p_guess": [
0.5,
],
"p_s": [
1.0,
],
'p_leave': [
0.1,
],
"p_slip": [
0.1,
0.4,
],
'p_lose': [
0.05,
0.1,
],
"p_1": [0.5],
"p_worker": [
0.8,
0.2,
],
"p_r": [
1.0,
],
}
DEFAULT_GATING_PARAMS = {
'n_gold_sliding': 10,
'batch_size': 20,
'desired_accuracy': 0.8,
'gold_per_batch': 5,
'exponential_backoff': True,
'n_tutorial': 0,
'n_screening': 0,
}
|
#!/usr/bin/python
import socket
import os
#Enter the path to the server root directory
path = "srv/"
#Enter the path to the log file
log = "log/"
#Enter the IP address to listen on
listen = "127.0.0.1"
restrict_ip = True
approved_ip = ["127.0.0.5"]
print '''
------------------------------------------------------------------------------
____ _____
/ __ \____ _____/ ___/___ ______ __
/ / / / __ \/ ___/\__ \/ _ \/ ___/ | / /
/ /_/ / /_/ / /__ ___/ / __/ / | |/ /
/_____/\____/\___//____/\___/_/ |___/
DocServ server version 1.2.1
Copyright (c) 2014 Sasha Pavelovich
MIT license
'''
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((listen,32891))
s.listen(5)
while 1:
client, address = s.accept()
data = client.recv(1024)
if data:
ip,port = client.getpeername()
if restrict_ip == True and ip not in approved_ip:
client.send("Invalid IP address")
continue
data = data.split('|')
if len(data) != 3:
client.send("Invalid request")
continue
username = data[0]
hostname = data[1]
data = data[2]
request = ""+ip+" "+username+" @ "+hostname+" - "+data
print request
if data == "/":
data = "index.txt"
if log.endswith('/'):
log = log[:-1]
with open(log+"/server.log",'a') as f:
f.write(request+'\n')
if path not in os.path.abspath(path+"/"+data+"/"):
# client.send(os.path.abspath(path+"/"+data+"/"))
client.send("File path error")
continue
if data == '/':
if os.path.isfile(path+"/index.txt") == False:
client.send("File not found")
continue
file = open(path+"/index.txt", 'r')
text = file.read()
client.send(text)
file.close()
continue
if path.endswith('/'):
path = path[:-1]
if os.path.exists(path+'/'+data) == False:
client.send("File not found")
continue
elif os.path.isfile(path+'/'+data) == False:
if data.endswith('/'):
data = data[:-1]
if os.path.isfile(path+'/'+data+"/index.txt") == False:
client.send("File not found")
continue
file = open(path+'/'+data+"/index.txt", 'r')
text = file.read()
client.send(text)
file.close()
continue
file = open(path+'/'+data, 'r')
text = file.read()
client.send(text)
file.close()
client.close()
|
inp = input("Enter the numbers to be added separated by spaces : ")
operands = inp.split(' ')
sum = 0.0
for i in operands:
sum += int(i)
print (i + ' + ', end='')
print (' = %.3f' %sum)
|
# import sys
# sys.path.append('..')
# from util.mlflow_util import load_uri, get_prev_run
from .mlflow_util import load_uri, get_prev_run
import numpy as np
import scipy.sparse as sps
import scipy.linalg as sla
import os
import mlflow
from sklearn.neighbors import NearestNeighbors
METRICS = ['euclidean', 'cosine']
class GraphManager(object):
"""
Mostly Graph methods
Necessary Fields:
self.param
Optional Fields:
"""
def __init__(self):
return
def from_features(self, X, knn=15, sigma=3., normalized=True, n_eigs=None, zp_k=None, metric='euclidean', debug=False):
"""
load from features using params
params:
knn
sigma
normalized
n_eigs
zp_k
"""
assert metric in METRICS
print("Computing (or retrieving) graph evals and evecs with parameters:")
print("\tN = {}, knn = {}, sigma = {}".format(X.shape[0], knn, sigma))
print("\tnormalized = {}, n_eigs = {}, zp_k = {}".format(normalized, n_eigs, zp_k))
print("\tmetric = {}".format(metric))
print()
if not debug:
params = {
'knn' : knn,
'sigma' : sigma,
'normalized' : normalized,
'n_eigs' : n_eigs,
'zp_k' : zp_k,
'metric' : metric
}
prev_run = get_prev_run('GraphManager.from_features',
params,
tags={"X":str(X), "N":str(X.shape[0])},
git_commit=None)
if prev_run is not None:
print('Found previous eigs')
eigs = load_uri(os.path.join(prev_run.info.artifact_uri,
'eigs.npz'))
return eigs['w'], eigs['v']
print('Did not find previous eigs, computing from scratch...')
W = self.compute_similarity_graph(X=X, knn=knn, sigma=sigma,
zp_k=zp_k, metric=metric)
L = self.compute_laplacian(W, normalized=normalized)
w, v = self.compute_spectrum(L, n_eigs=n_eigs)
L = sps.csr_matrix(L)
self.W = W
if debug:
return w, v
with mlflow.start_run(nested=True):
np.savez('./tmp/eigs.npz', w=w, v=v)
sps.save_npz('./tmp/W.npz', W)
mlflow.set_tag('function', 'GraphManager.from_features')
mlflow.set_tag('X', str(X))
mlflow.set_tag('N', str(X.shape[0]))
mlflow.log_params(params)
mlflow.log_artifact('./tmp/eigs.npz')
mlflow.log_artifact('./tmp/W.npz')
os.remove('./tmp/eigs.npz')
os.remove('./tmp/W.npz')
return w, v
def compute_similarity_graph(self, X, knn=15, sigma=3., zp_k=None, metric='euclidean', maxN=5000):
"""
Computes similarity graph using parameters specified in self.param
"""
N = X.shape[0]
if knn is None:
if N < maxN:
knn = N
else:
print("Parameter knn was given None and N > maxN, so setting knn=15")
knn = 15
if N < maxN:
print("Calculating NN graph with SKLEARN NearestNeighbors...")
if knn > N / 2:
nn = NearestNeighbors(n_neighbors=knn, algorithm='brute').fit(X)
else:
nn = NearestNeighbors(n_neighbors=knn, algorithm='ball_tree').fit(X)
# construct CSR matrix representation of the k-NN graph
A_data, A_ind = nn.kneighbors(X, knn, return_distance=True)
else:
print("Calculating NN graph with NNDescent package since N = {} > {}".format(N, maxN))
from pynndescent import NNDescent
index = NNDescent(X, metric=metric)
index.prepare()
A_ind, A_data = index.query(X, k=knn)
# modify from the kneighbors_graph function from sklearn to
# accomodate Zelnik-Perona scaling
n_nonzero = N * knn
A_indptr = np.arange(0, n_nonzero + 1, knn)
if zp_k is not None and metric == 'euclidean':
k_dist = A_data[:,zp_k][:,np.newaxis]
k_dist[k_dist < 1e-4] = 1e-4
A_data /= np.sqrt(k_dist * k_dist[A_ind,0])
A_data = np.ravel(A_data)
if metric == 'cosine':
print(np.max(A_data))
W = sps.csr_matrix(((1.-A_data), # need to do 1.-A_data since NNDescent returns cosine DISTANCE (1. - cosine_similarity)
A_ind.ravel(),
A_indptr),
shape=(N, N))
else:
W = sps.csr_matrix((np.exp(-(A_data ** 2)/sigma),
A_ind.ravel(),
A_indptr),
shape=(N, N))
W = (W + W.T)/2
#W = max(W, W.T)
W.setdiag(0)
W.eliminate_zeros()
return W
def compute_laplacian(self, W, normalized=True):
"""
Computes the graph Laplacian using parameters specified in self.params
"""
if normalized:
L = sps.csgraph.laplacian(W, normed=True)
else:
L = sps.csgraph.laplacian(W, normed=False)
return L
def compute_spectrum(self, L, n_eigs):
"""
Computes first n_eigs smallest eigenvalues and eigenvectors
"""
N = L.shape[0]
if n_eigs is None:
n_eigs = N
if n_eigs > int(N/2):
w, v = sla.eigh(L.toarray(), eigvals=(0,n_eigs-1))
else:
w, v = sps.linalg.eigsh(L, k=n_eigs, which='SM')
return w, v
|
from ..heaps import *
# 10.1 Merge sorted array
# pytest -s EPI\tests\test_heaps.py::test_merge_sorted_array
def test_merge_sorted_array():
res = merge_sorted_array([[2, 20, 200], [3, 30, 300], [4, 40, 400]])
assert res[0] == 2
assert res[-1:] == [400]
# 10.2 Sort K ascending and descending
# pytest -s EPI\tests\test_heaps.py::test_sort_k_increasing_decresing_array
def test_sort_k_increasing_decresing_array():
l = [57, 131, 493, 294, 221, 339, 418, 452, 442, 190]
sorted_array = merge_sorted_array(sort_k_increasing_decresing_array(l))
print(sorted_array)
assert len(sorted_array) == 10
|
from django.urls import path
from main import views
urlpatterns = [
path('', views.StoryListView.as_view(), name='story_list'),
path('create/', views.StoryCreateView.as_view(), name='story_create'),
path('<slug:slug>/', views.StoryDetailView.as_view(), name='story_detail'),
# URLs for endpoints
path('api/list/dreams/', views.DreamStoryList.as_view()),
path('api/list/nightmares/', views.NightmareStoryList.as_view()),
path('api/<slug:story_slug>/', views.StoryDetail.as_view()),
path('api/up/<int:pk>/', views.story_upvote),
path('api/down/<int:pk>/', views.story_downvote),
] |
"""
compare coeffs
change name1, name2 to compare coeffs between different tasks
"""
#%%
from numpy import *
import torch
import matplotlib.pyplot as plt
import conf
name1 = 'burgers-2-upwind-sparse0-noise0.001-block6'
name2 = 'burgers-2-upwind-sparse0.005-noise0.001-block6'
D = []
D.append(torch.load('coeffs/burgers/'+name1)) # blue
D.append(torch.load('coeffs/burgers/'+name2)) # orange
# D.append(torch.load('checkpoint/'+name3+'/errs')) # red
# D.append(torch.load('checkpoint/'+name4+'/errs')) # yellow
coeffs0 = list(d['coeffs0'] for d in D)
coeffs1 = list(d['coeffs1'] for d in D)
edgecolorlist = ('#1B2ACC','#CC4F1B')#, 'red') #, 'yellow')
facecolorlist = ('#089FFF','#FF9848')#, 'red') #, 'yellow')
upq = 100
downq = 0
alpha = 0.25 # facecolor transparency
fig,ax = plt.subplots(1,1)
title = ''
n = 40
startidx = 4
valuerange = 0.015
x = arange(startidx+1,n+1,dtype=float64)
j = 0
i = 0
for s in range(len(edgecolorlist)):
y = coeffs0[s][:,startidx:n].copy()
y[np.isnan(y)] = np.inf
y_up = percentile(y,q=upq,axis=0)
y_down = percentile(y,q=downq,axis=0)
ax.fill_between(x,y_down,y_up,edgecolor=edgecolorlist[s], facecolor=facecolorlist[s],\
linestyle='-', alpha=alpha)
ax.set_ylim(-valuerange,valuerange)
ax.grid()
ax.xaxis.set_tick_params(labelsize=10)
ax.yaxis.set_tick_params(labelsize=10)
alpha = 0.25 # facecolor transparency
fig,ax = plt.subplots(1,1)
title = ''
n = 40
x = arange(startidx+1,n+1,dtype=float64)
j = 0
i = 0
for s in range(len(edgecolorlist)):
y = coeffs1[s][:,startidx:n].copy()
y[np.isnan(y)] = np.inf
y_up = percentile(y,q=upq,axis=0)
y_down = percentile(y,q=downq,axis=0)
ax.fill_between(x,y_down,y_up,edgecolor=edgecolorlist[s], facecolor=facecolorlist[s],\
linestyle='-', alpha=alpha)
ax.set_ylim(-valuerange,valuerange)
ax.grid()
ax.xaxis.set_tick_params(labelsize=10)
ax.yaxis.set_tick_params(labelsize=10)
#%% imshow
for s in range(len(edgecolorlist)):
fig,ax = plt.subplots(1,1)
y = np.abs(coeffs0[s][:,startidx:n].copy())
y.sort(axis=0)
z = ax.imshow(y, cmap='jet', vmin=0, vmax=valuerange)
fig.colorbar(z, ax=ax)
# ax.set_title(r'remainder for u-component', fontsize=15)
for s in range(len(edgecolorlist)):
fig,ax = plt.subplots(1,1)
y = np.abs(coeffs1[s][:,startidx:n].copy())
y.sort(axis=0)
z = ax.imshow(y, cmap='jet', vmin=0, vmax=valuerange)
fig.colorbar(z, ax=ax)
# ax.set_title(r'remainder for v-component', fontsize=15)
#%%
|
# -*- coding: utf-8 -*
'''
判断输入年份是不是闰年
'''
year = int(input('年份 = '))
is_leapYear = (year % 4 == 0 and year % 100 != 0 or year % 400 == 0)
print(is_leapYear) |
#!/usr/bin/python
#-*- coding:utf-8 -*-
# 1.09 Finding Commonalities in Two Dictionaries
a = {
'x' : 1,
'y' : 2,
'z' : 3
}
b = {
'w' : 10,
'x' : 11,
'y' : 2
}
# Find key in common
print(a.keys() & b.keys())
# Find keys in a that are not in b
print(a.keys() - b.keys())
# Find (key,value) pairs in common
print(type(a.items()))
print(a.items() & b.items())
# Make a new dictionary with certain keys removed
c = {key:a[key] for key in a.keys() - (a.keys() & b.keys())}
print(c) |
import os,sys
import numpy
import glob
indir='outputs_fiml'
infiles=glob.glob(os.path.join(indir,'*'))
infiles.sort()
outfile=indir+'.txt'
assert not os.path.exists(outfile)
f=open(outfile,'w')
for i in infiles:
l=[x.strip() for x in open(i).readlines()]
f.write('%s\n'%'\t'.join(l))
f.close()
|
class Solution:
def isValid(self, s: str) -> bool:
valids = {
"(" : ")",
"{" : "}",
"[" : "]"
}
stack = Stack()
for ch in s:
if ch in valids:
stack.push(valids[ch])
else:
if stack.isEmpty():
return False
elif stack.pop() is ch:
continue
else:
return False
if stack.isEmpty():
return True
return False
class Stack:
def __init__(self):
self.data = []
def push(self, data):
self.data.append(data)
def pop(self):
return self.data.pop()
def isEmpty(self):
if len(self.data) > 0:
return False
return True |
import sys
rid=sys.argv[1]
title=sys.argv[2]
body=sys.argv[3]
# Send to single device.
from pyfcm import FCMNotification
push_service = FCMNotification(api_key="AIzaSyA3hfQjZ3xn2a_4KKA3rKaPCaP_71B7CCQ")
# Your api-key can be gotten from: https://console.firebase.google.com/project/<project-name>/settings/cloudmessaging
registration_id = rid.split()
message_title = title
message_body = body
result = push_service.notify_multiple_devices(registration_ids=registration_id, message_body=message_body,sound="Default")
print result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
def tax(bill):
bill *= 1.08
print "With tax: %f" % bill
return bill
def tip(bill):
bill *= 1.15
print "With tip: %f" % bill
return bill
meal_cost = 100
meal_with_tax = tax(meal_cost)
meal_with_tip = tip(meal_with_tax)
'''
def one(n):
return n + 1
def two(n):
return one(n) + 2
print n
n = 1
n = one(n)
print n
n = two(n)
print n
|
import pytest
import sys, time
from .timer import PerpetualTimer
def test_timer():
def timer_func(results):
results.append(0)
nums = []
t = PerpetualTimer(0.01, timer_func, args=(nums,))
assert not t._should_continue
assert t.thread is None
t.cancel()
assert not t._should_continue
assert t.thread is None
t._start_timer()
assert not t._should_continue
assert t.thread is None
t.start()
assert t._should_continue
t.start()
assert t._should_continue
assert t._should_continue
time.sleep(0.05)
t.cancel()
nums_len = len(nums)
assert 4 <= len(nums) <= 6
time.sleep(0.02)
assert len(nums) == nums_len
|
import re
DOMAIN_PATTERN = re.compile(
r'^(?:[a-z\d\-_]{1,62}\.){0,125}'
r'(?:[a-z\d](?:\-(?=\-*[a-z\d])|[a-z]|\d){0,62}\.)'
r'[a-z\d]{1,63}$'
)
# The srcset width tolerance dictates the _maximum tolerated size_
# difference between an image's downloaded size and its rendered size.
# For example, setting this value to 10 means that an image will not
# render more than 10% larger or smaller than its native size.
SRCSET_WIDTH_TOLERANCE = 0.08
# The minimum srcset width tolerance.
SRCSET_MIN_WIDTH_TOLERANCE = 0.01
# The default srcset target ratios.
SRCSET_DPR_TARGET_RATIOS = range(1, 6)
SRCSET_MAX_SIZE = 8192
# Representation of an image with a width of zero. This value is used
# in validation contexts, i.e. "is the width of the passed or requested
# image greater than or equal to the 'zero width image'."
IMAGE_ZERO_WIDTH = 0
# The minimum width of a default generated image-width.
IMAGE_MIN_WIDTH = 100
# The maximum width of a default generated image-width.
IMAGE_MAX_WIDTH = 8192
# The default dpr qualities used when variable output quality is enabled.
DPR_QUALITIES = {1: 75, 2: 50, 3: 35, 4: 23, 5: 20}
SRCSET_TARGET_WIDTHS = [
100, 116, 135, 156, 181, 210, 244, 283,
328, 380, 441, 512, 594, 689, 799, 927,
1075, 1247, 1446, 1678, 1946, 2257, 2619,
3038, 3524, 4087, 4741, 5500, 6380, 7401, 8192]
|
# 简单递归
def countNodes(root):
if not root:
return 0
return self.countNodes(root.left) + self.countNodes(root.right) + 1
# 利用完全二叉树的性质
class Solution {
public int countNodes(TreeNode root) {
/**
完全二叉树的高度可以直接通过不断地访问左子树就可以获取
判断左右子树的高度:
如果相等说明左子树是满二叉树, 然后进一步判断右子树的节点数(最后一层最后出现的节点必然在右子树中)
如果不等说明右子树是深度小于左子树的满二叉树, 然后进一步判断左子树的节点数(最后一层最后出现的节点必然在左子树中)
**/
if (root==null) return 0;
int ld = getDepth(root.left);
int rd = getDepth(root.right);
if(ld == rd) return (1 << ld) + countNodes(root.right); // 1(根节点) + (1 << ld)-1(左完全左子树节点数) + 右子树节点数量
else return (1 << rd) + countNodes(root.left); // 1(根节点) + (1 << rd)-1(右完全右子树节点数) + 左子树节点数量
}
private int getDepth(TreeNode r) {
int depth = 0;
while(r != null) {
depth++;
r = r.left;
}
return depth;
}
} |
#!/usr/bin/env python3
#
# Maxwell coil plot example
#
import math
import loopfield as lf
import loopfield.plot as lfp
# field object
field = lf.Field(length_units = lf.cm,
current_units = lf.A,
field_units = lf.uT)
# Maxwell coil model with single current loops
R = 10
# center winding
c1 = lf.Loop([0, 0, 0], [1, 0, 0], R, 64)
# outer windings
c2 = lf.Loop([-R*math.sqrt(3./7.), 0, 0], [1, 0, 0], R*math.sqrt(4./7.), 49)
c3 = lf.Loop([+R*math.sqrt(3./7.), 0, 0], [1, 0, 0], R*math.sqrt(4./7.), 49)
# add windings to field
field.addLoop(c1)
field.addLoop(c2)
field.addLoop(c3)
# evaluate field at center of coil
Bc = field.evaluate([0., 0., 0.])
print('Bc = ', Bc)
print('Calculating plot...')
# function returns ratio of x-component to that at coil center
def x_ratio(B):
return B[0] / Bc[0]
# create XY plot
min_x = -15
max_x = +15
min_y = -15
max_y = +15
n_x = 101
n_y = 101
plot = lfp.plotXY(field,
min_x, max_x, n_x,
min_y, max_y, n_y)
# add field lines
plot.fieldLines()
# add loop symbols
plot.loopSymbols(scale = 1.)
# add 1% error bound region
tol = 0.01
plot.region(x_ratio, [1.-tol, 1.+tol], color='red', alpha=0.5,
label = ('Field error < %2.1f%%' % (100*tol)))
# add rectangular area hand-adjusted to fit in 1% error volume
area_x = 3.7
area_y = 4.2
plot.rectangle([-area_x, +area_x, -area_y, +area_y],
color='blue', alpha = 0.5,
label = (' %2.1f x %2.1f cm' % (2*area_x, 2*area_y)))
# add text
plot.labels(title = '10cm 49/64/49-Turn Maxwell Coil',
xlabel = 'x (cm)', ylabel = 'y (cm)')
# save plot
plot.save('maxwell_coil.png')
print('Plot written to "maxwell_coil.png"')
|
##---to import line_table as pandas dataframe, apply detection criteria, and pump out latex tables----##
##----by Ayan-------##
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 50)
pd.set_option('display.width', 1000)
import argparse as ap
import os
HOME = os.getenv('HOME') + '/'
import sys
sys.path.append(HOME + 'Work/astro/ayan_codes/mageproject/')
import ayan.splot_util as u
import ayan.mage as m
import re
import subprocess
# -----------to check if line is detected given our etection criteria------------------------------------
def detect(table, EW_thresh=0, SNR_thresh=0, EW_signi='EW_signi', flux='f_line', flux_u='f_line_u'):
return ((np.abs(table[EW_signi]) >= EW_thresh) & (table[flux].values / table[flux_u].values >= SNR_thresh))
# ------------to check if line is (semi/)forbidden and format Id accordingly-----------------------------------
def format_forbidden(label1):
forbidden = ['CIII1906', 'SiII1808', 'SiII1816', 'SiIII1882', 'OIII2320', 'OII2470', 'OII3727', 'OII3729', 'OII3727,9', 'NeIII3869',
'NeIII3968', \
'OIII4362', 'OIII4959', 'OIII5007', 'ArIV4741', 'NII6584', 'SII6717', 'SII6730', 'ArIII7136']
semiforbidden = ['CIII1908', 'NII1430', 'NII1431', 'NIV1486', 'OIII1660', 'OIII1666', 'NIII1750', 'SiIII1892',
'NII2140', \
'CII2323', 'CII2325c', 'CII2325d', 'CII2328', 'SiII2335a', 'SiII2335b']
new_label = []
for label in label1.split(';'):
if label in forbidden:
brackets = ('{[}', ']') # curly braces otherwise latex table does not compile
elif label in semiforbidden:
brackets = ('', ']')
else:
brackets = ('', '')
label = brackets[0] + label
label = re.sub(r"(\d+)", brackets[1] + r"\1", label, count=1)
label = re.sub(r"(I+)", r" \1", label, count=1)
label = re.sub(r"(\d+)", r" \1", label, count=1) # to format line labels with space
new_label.append(label)
return '; '.join(new_label)
# -----to format latex column headers------------------------------------------
def column_header_format(header, string_replace_dict, substring_replace_dict, sep='_'):
n = len(sep)
index1 = header.find(sep)
if index1 == -1:
string = header
substring, subsubstring = '', ''
else:
string = header[:index1]
index2 = header[index1 + n:].find(sep)
if index2 == -1:
substring = header[index1 + n:]
subsubstring = ''
else:
substring = header[index1 + n:index1 + n + index2]
subsubstring = header[index1 + index2 + n + n:]
isdelta = r'$\delta$ ' if subsubstring == 'u' else r''
string_replace = string_replace_dict[string] if string in string_replace_dict else string
substring_replace = substring_replace_dict[substring] if substring in substring_replace_dict else substring
if string_replace == 'rest' and substring_replace == '$\lambda$': string_replace, substring_replace = '$\lambda$', 'rest'
header_string = isdelta + string_replace
header_substring = ' ID' if substring_replace == 'ID' else '$_{\mathrm{' + substring_replace + '}}$'
new_header = header_string + header_substring
new_header = new_header.replace('$$', '')
return new_header
# -----------------------------------------------
if __name__ == '__main__':
parser = ap.ArgumentParser(description="Tool to transform pandas dataframe to tex and ASCII table")
parser.add_argument('--onlyem', dest='onlyem', action='store_true')
parser.set_defaults(onlyem=False)
parser.add_argument('--onlyinterv', dest='onlyinterv', action='store_true')
parser.set_defaults(onlyinterv=False)
parser.add_argument('--nopizi', dest='nopizi', action='store_true')
parser.set_defaults(nopizi=False)
parser.add_argument('--notex', dest='notex', action='store_true')
parser.set_defaults(notex=False)
parser.add_argument("--infile")
parser.add_argument("--outpath")
parser.add_argument("--outfile")
parser.add_argument("--shortlabel")
parser.add_argument("--EW_thresh")
parser.add_argument("--SNR_thresh")
parser.add_argument("--const")
args, leftovers = parser.parse_known_args()
if args.infile is not None:
infile = args.infile
else:
infile = HOME + 'Dropbox/papers/abundance_pap/AA_working/lineflux_restUV.txt' # full-path-name of input file
inpath = os.path.split(infile)[0] + '/'
if args.shortlabel is None: args.shortlabel = 'rcs0327-E'
if args.outpath is not None:
outpath = args.outpath
else:
outpath = inpath # full-path of output directory
subprocess.call(['mkdir -p '+outpath+'PIZI_tables/'], shell=True)
subprocess.call(['mkdir -p '+outpath+'txt_tables/'], shell=True)
subprocess.call(['mkdir -p '+outpath+'tex_tables/'], shell=True)
if args.outfile is not None:
outfile = args.outfile
elif 'allspec' in infile:
outfile = os.path.splitext(os.path.basename(infile))[0].replace('allspec', args.shortlabel) + '_detected'
else:
outfile = os.path.splitext(os.path.basename(infile))[0] + '_detected'
if args.EW_thresh is not None:
EW_thresh = float(args.EW_thresh)
else:
EW_thresh = 3.
if args.SNR_thresh is not None:
SNR_thresh = float(args.SNR_thresh)
else:
SNR_thresh = 1.
if args.const is not None:
const = float(args.const)
else:
const = 1e-17
# -----------------------------------------------
line_table = pd.read_table(infile, delim_whitespace=True, comment="#") # input dataframe file
print 'Deb136:', args.shortlabel, infile #
if args.shortlabel is not None: line_table = line_table[line_table['label'].eq(args.shortlabel)]
if args.onlyem: line_table = line_table[line_table.EWr_fit <= 0.].reset_index(drop=True)
if args.onlyinterv:
line_table = line_table[line_table.type == 'INTERVE'].reset_index(drop=True)
quantities_to_extract = ['line_lab', 'rest_wave', 'EWr_fit', 'EWr_fit_u', 'zz', 'zz_u']
outfile += '_interv'
tab = line_table[quantities_to_extract]
else:
quantities_to_extract = ['line_lab', 'rest_wave', 'EWr_fit', 'EWr_fit_u', 'EW_signi', 'EWr_Suplim', 'f_line',
'f_line_u', 'f_Suplim']
if 'f_redcor' in line_table:
quantities_to_extract += ['f_redcor', 'f_redcor_u', 'f_Suplim_redcor']
tab = line_table[quantities_to_extract]
tab.loc[:, 'f_line'] /= const
tab.loc[:, 'f_line_u'] /= const
tab.loc[:, 'f_Suplim'] /= const
# print tab#
if 'f_redcor' in tab:
tab.loc[:, 'f_redcor'] /= const
tab.loc[:, 'f_redcor_u'] /= const
tab.loc[:, 'f_Suplim_redcor'] /= const
bad = ~detect(tab, EW_thresh=EW_thresh, SNR_thresh=SNR_thresh)
tab.EW_signi = tab.EW_signi.astype(np.float64)
tab.EWr_Suplim = tab.EWr_Suplim.astype(np.str)
if 'f_redcor' in tab: tab.loc[:, 'f_Suplim_redcor'] = tab.loc[:, 'f_Suplim_redcor'].map('{:,.2f}'.format).astype(
np.str)
tab.loc[:, 'f_Suplim'] = tab.loc[:, 'f_Suplim'].map('{:,.2f}'.format).astype(np.str)
tab.EWr_fit = tab.EWr_fit.astype(np.str)
if 'f_redcor' in tab: tab.f_redcor = tab.f_redcor.map('{:,.2f}'.format).astype(np.str)
tab.loc[:, 'f_line'] = tab.loc[:, 'f_line'].map('{:,.2f}'.format).astype(np.str)
tab.loc[:, 'EWr_fit'] = np.where(bad, '>' + tab.EWr_Suplim, tab.EWr_fit) # sign is other way round because EW is -ve
if 'f_redcor' in tab: tab.loc[:, 'f_redcor'] = np.where(bad, '<' + tab.f_Suplim_redcor, tab.f_redcor)
tab.loc[:, 'f_line'] = np.where(bad, '<' + tab.f_Suplim, tab.f_line)
tab.EWr_fit_u = tab.EWr_fit_u.astype(np.str)
tab.f_line_u = tab.f_line_u.astype(np.str)
tab.loc[bad, 'EWr_fit_u'] = None
tab.loc[bad, 'f_line_u'] = None
if 'f_redcor' in tab: tab.loc[bad, 'f_redcor_u'] = None
tab.loc[bad, 'EW_signi'] = None
quantities_to_show = ['line_lab', 'rest_wave', 'EWr_fit', 'EWr_fit_u', 'EW_signi', 'f_line', 'f_line_u']
if 'f_redcor' in tab: quantities_to_show += ['f_redcor', 'f_redcor_u']
tab = tab[quantities_to_show]
tab['f_line_u'] = tab['f_line_u'].astype(np.float64).map('{:,.2f}'.format)
if 'f_redcor' in tab: tab['f_redcor_u'] = tab['f_redcor_u'].astype(np.float64).map('{:,.2f}'.format)
tab['EW_signi'] = tab['EW_signi'].astype(np.float64).map('{:,.2f}'.format)
tab['EWr_fit_u'] = tab['EWr_fit_u'].astype(np.float64).map('{:,.2f}'.format)
tab.line_lab = tab.line_lab.str.replace('_', '').str.replace('*', '')
if not args.onlyinterv: tab = m.get_flux_from_atomic(tab, labelcol='line_lab', fluxcol='f_line', fluxucol='f_line_u', \
dered_fluxcol='f_redcor', dered_fluxucol='f_redcor_u',
bad_value='nan') # modify some undetected flux values by tying them to relevant atomic ratios
# ------modifying column names for S1723----------
if 's1723' in args.shortlabel:
# tab = tab.drop(['EWr_fit', 'EWr_fit_u', 'EW_signi'], axis=1) # commented out by AA on 4th Mar '19 because JRR needs EW values in final files
tab.rename(columns={'line_lab': 'ID', 'f_line': 'integrated_flux', 'f_line_u': 'uncert_flux'}, inplace=True)
if 'f_redcor' in tab: tab.rename(columns={'f_redcor': 'dered_flux', 'f_redcor_u': 'uncert_dered_flux'},
inplace=True)
if 'ESI' in args.shortlabel:
spectroscope = 'ESI'
elif 'MMT' in args.shortlabel:
spectroscope = 'MMT'
tab['spectrograph'] = spectroscope
tab['Notes'] = '--'
header = 'This file contains the measurements of lines in the MagE sample. Generated by dftolatex.py.\n\
From file ' + infile + '\n\
Columns are:\n\
line_lab: label of the line the code was asked to fit\n\
rest_wave: rest frame vacuum wavelength of the line (A)\n\
integrated_flux: flux i.e. area under Gaussian fit (units of ' + str(const) + ' erg/s/cm^2)\n\
uncert_flux: error in above qty. (units of ' + str(const) + ' erg/s/cm^2)\n\
'
if 'dered_flux' in tab:
header += '\
dered_flux: dereddened flux (units of ' + str(const) + ' erg/s/cm^2)\n\
uncert_dered_flux: error in above qty. (units of ' + str(const) + ' erg/s/cm^2)\n\
'
if 'stack' in args.shortlabel:
tab = tab.drop(['f_line', 'f_line_u'], axis=1)
if 'f_redcor' in tab: tab.drop(['f_redcor', 'f_redcor_u'], axis=1)
tab.rename(columns={'line_lab': 'ID'}, inplace=True)
header = 'This file contains the measurements of lines in the MagE stacked spectra. Generated by dftolatex.py.\n\
From file ' + infile + '\n\
Columns are:\n\
line_lab: label of the line the code was asked to fit\n\
rest_wave: rest frame vacuum wavelength of the line (A)\n\
EWr_fit: Equivalent width of Gaussian fit (units of A)\n\
EWr_fit_u: error in above qty. (units of A)\n\
EW_signi: significance of the detection\n\
'
else:
header = '#Fluxes are in %.2E flambda units\n' % (const)
# --replacing nans and line summed IDs in txt file--
tab_txt = tab.replace(['nan'], ['-'], regex=True)
# --adding header to txt file--
fullfilename = outpath + 'txt_tables/' + outfile + '.txt'
np.savetxt(fullfilename, [], header=header, comments='#')
tab_txt.to_csv(fullfilename, sep='\t', mode='a', index=None)
print 'Written files ' + fullfilename
if not args.nopizi and not args.onlyinterv:
# --writing to separate txt file for use by PIZI--
fullfilename = outpath + 'PIZI_tables/' + outfile + '_forPIZI.txt'
tab_txt = tab_txt.replace(['OII2470'], ['OII2470a;OII2470b'], regex=True)
no_PIZI_lines = ['Fe', 'Mg', 'Blend'] # lines certainly not to be included for PIZI
labelcol = 'ID' if 's1723' in args.shortlabel else 'line_lab'
for l in no_PIZI_lines: tab_txt[labelcol] = tab_txt[labelcol].str.replace(l, '#' + l)
with open(fullfilename, 'w') as file:
file.write(header)
tab_txt.to_csv(fullfilename, sep='\t', mode='a', index=None)
# --adding few lines by hand to txt file for use by PIZI--
if args.shortlabel == 'rcs0327-E' and not args.onlyinterv:
with open(fullfilename, 'a') as f:
f.write('\
#Added OII lines to use for PIZI input for UV+O2 case\n\
#OII3727 3727.092 999 999 509.228 17.400 1281.769 28.999\n\
#OII3729 3729.875 999 999 443.935 23.182 1512.625 34.773\n\
')
print 'Written ' + fullfilename
if not args.notex:
# --replacing stuff in tex file--
to_replace = [('<', r'$<$'), ('nan', '..'), ('Ly-alpha', r'Ly$\\alpha$')] # [(replace_this, with_this),..]
to_replace = np.array(to_replace)
tab_tex = tab.replace(to_replace[:, 0].tolist(), to_replace[:, 1].tolist(), regex=True)
if 'f_redcor_u' in tab_tex: tab_tex.drop('f_redcor_u', axis=1,
inplace=True) # to exclude column of uncertainty in dereddened flux
# --formatting column names in tex file--
string_replace_dict = {'line': 'Line', 'f': 'flux'}
substring_replace_dict = {'lab': 'ID', 'wave': '$\lambda$', 'redcor': 'dereddened', 'line': 'obs'}
if 'stack' in args.shortlabel:
tab_tex.rename(columns={'EWr_fit': r'W$_{r}$', 'EWr_fit_u': r'$\delta$ W$_{r}$', 'EW_signi': r'significance'},
inplace=True)
else:
for i in range(len(tab_tex.columns)):
tab_tex.columns.values[i] = column_header_format(tab_tex.columns.values[i], string_replace_dict,
substring_replace_dict)
tab_tex.rename(columns={r'EWr$_{\mathrm{fit}}$': r'W$_{\mathrm{r,fit}}$',
r'$\delta$ EWr$_{\mathrm{fit}}$': r'$\delta$ W$_{\mathrm{r,fit}}$',
r'EW$_{\mathrm{signi}}$': r'W$_{\mathrm{r,signi}}$'}, inplace=True)
tab_tex['Line ID'] = tab_tex['Line ID'].apply(lambda x: format_forbidden(x)) # to format line labels with space and brackets
# --writing the tex file--
fullfilename = outpath + 'tex_tables/' + outfile + '.tex'
tab_tex.to_latex(fullfilename, index=None, escape=False)
# --assigning captions--
if args.shortlabel == 'rcs0327-E':
if args.onlyinterv:
caption = 'Intervening absorption lines in \knotE Magellan/MagE spectrum. W$_{\mathrm{r,fit}}$ denotes the rest-frame \
equivalent width measured, in \AA. $z$ an $\Delta z$ are the redshift and corresponding uncertainty respectively, as \
measured from our line fitting code.'
subcaption = r'& (\AA) & (\AA) & (\AA) & & \\'
elif args.onlyem:
caption = 'MagE/Magellan line flux measurements. flux$_{\mathrm{obs}}$ and $\delta$ flux$_{\mathrm{obs}}$ denote \
the observed flux and uncertainty respectively. flux$_{\mathrm{dereddened}}$ is the dereddened flux using \
E(B-V) = 0.4 $\pm$ 0.07. W$_\mathrm{r,fit}$ and $\delta$ W$_{\mathrm{r,fit}}$ denote the rest-frame \
equivalent width measured and the corresponding uncertainty in \AA\, respectively. For cases of \
non-detection (i.e. < ' + str(int(EW_thresh)) + ' $\sigma$ detection), the 3 $\sigma$ upper limit on equivalent widths and fluxes are quoted. \
Uncertainty estimates for these entries are not quoted because they do not provide any meaningful information.'
subcaption = '& (\AA) & (\AA) & (\AA) & (\AA) & (10$^{' + str(int(np.log10(const))) + '}$ ergs/s/cm$^2$) & \
(10$^{' + str(int(np.log10(const))) + '}$ ergs/s/cm$^2$) & (10$^{' + str(int(np.log10(const))) + r'}$ ergs/s/cm$^2$) \\'
else:
caption = ''
subcaption = ''
# --adding caption to beginning of table--
u.insert_line_in_file(r'\caption{' + caption + '}\n', 0, fullfilename) # use -1 instead of 0 to append caption to end of table
u.insert_line_in_file(subcaption + '\n', 4, fullfilename) # use -1 instead of 0 to append caption to end of table
print 'Written ' + fullfilename
if not args.notex:
print tab_tex
else:
print tab_txt
print 'Finished!'
|
def shuffle(nums, n):
l1 = nums[:n]
l2 = nums[n:]
print(l1)
print(l2)
print(shuffle([1, 2, 3, 4], 2)) |
import unittest
class MyDict(object):
pass
class TestMydDict(unittest.TestCase):
def test_init(self):
print("测试前准备")
def tearDown(self):
print("测试后准备")
def test_init(self):
md = MyDict(one = 1, two = 2)
self.assertEqual(md['one'],1)
self.assertEqual(md['two'],2)
def test_nothing(self):
pass
if __name__=='_main_':
unittest.main() |
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
#d = {'one': Series([1., 2., 3.], index=['a', 'b', 'c']), 'two': Series([1., 2., 3., 4.], index=['a','b', 'c', 'd'])}
#df = DataFrame(d, index=['r', 'd', 'a'], columns=['two', 'three'])
#df = DataFrame(d)
#print df.index
#print df.columns
#print df.values
#d = {'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]}
# df = DataFrame(d, index=['a', 'b', 'c', 'd'])
# print df
#df = DataFrame()
#print df
# a = Series(range(5))
# b = Series(np.linspace(4, 20, 5))
# df = pd.concat([a, b], axis=1)
# print df
df = DataFrame()
index = ['alpha', 'beta', 'gamma', 'delta', 'eta']
for i in range(5):
a = DataFrame([np.linspace(i, 5*i, 5)], index=[index[i]])
df = pd.concat([df, a], axis=0)
print df
# print df[1]
df.columns = ['a', 'b', 'c', 'd', 'e']
# print df['b']
# df.b
# print type(df.b)
# df[['a', 'd']]
# print type(df[['a', 'd']])
# print df['b'][2]
# print df['a']['gamma']
#print df.iloc[1]
# print df.loc['beta']
# print df[1:3]
# bool_vec = [True, False, True, True, False]
# print df[bool_vec]
# print df[['b', 'd']].iloc[[1, 3]]
# print df.iloc[[1, 3]][['b', 'd']]
# print df[['b', 'd']].loc[['beta', 'delta']]
# print df.loc[['beta', 'delta']][['b', 'd']]
# print df.iat[2,3]
# print df.at['gamma', 'd']
# print df.ix['gamma', 4]
# print df.ix[['delta', 'gamma'], [1, 4]]
# print df.ix[[1, 2], ['b', 'e']]
# print df.ix[['beta', 2], ['b', 'e']]
# print df.ix[[1,2], ['b', 4]] |
# -*- coding: utf-8 -*-
""" Simrel is a package for simulating linear model data
.. moduleauthor:: Raju Rimal <raju.rimal@nmbu.no>
"""
# Import built-in modules first, followed by third-party modules,
# followed by any changes to the path and your own modules.
from .version import __version__
from .utilities import *
from .simrel import Unisimrel
|
#!/usr/bin/env python3
filename = 'example.txt'
filename = 'input.txt'
tiles = open(filename).read().splitlines()
width = len(tiles[0])
height = len(tiles)
def count_trees(dx, dy):
x = 0
y = 0
count = 0
while y < height - 1:
x += dx
y += dy
tx = x % width
ty = y
c = tiles[ty][tx]
# print(x, y, tx, ty, c)
if c == '#':
count += 1
return count
print('part1:', count_trees(3, 1))
print('part2:',
count_trees(1, 1)
* count_trees(3, 1)
* count_trees(5, 1)
* count_trees(7, 1)
* count_trees(1, 2)
)
|
#Import objects
import os
import csv
#Set the path for the csv file
file_path = os.path.join('..', 'Resources', 'election_data.csv')
#Initiate dicitionary
votes=[]
candidates=[]
#Initiate variables
winning_votes = 0
winner = ''
#Created function to calculate the total votes for a given candidate and to return that total
def calculate_votes(candidate_name,total_votes):
#Initiate variables
i=0
global total_candidate_votes
total_candidate_votes = 0
percent_of_votes = 0
#Loop through the votes list to add the votes for the given candidate
while i < len(votes):
if votes[i] == candidate_name:
total_candidate_votes += 1
i += 1
#Calcualte the percentage of votes for the given candidate
percent_of_votes = round(((total_candidate_votes/total_votes) * 100),3)
#display the candidates information on the terminal
print(f' {candidate_name}: {percent_of_votes:.3f}% ({total_candidate_votes})')
#Write the candidates information to the text file
Election_File.write(f' {candidate_name}: {percent_of_votes:.3f}% ({total_candidate_votes}) \n')
return total_candidate_votes
#Open and read csv file
with open(file_path,newline="",encoding="utf8") as elect_data:
elect_data_read = csv.reader(elect_data,delimiter=',')
#Skip the header
elect_header = next(elect_data_read)
#Create lists for each row and determine the total votes
votes = [row[2] for row in elect_data_read]
total_votes = len(votes)
#Display the total votes on the terminal
print(f'--- text')
print(f'Election Results')
print(f'-----------------------')
print(f'Total Votes: {total_votes}')
print(f'-----------------------')
#Write the total votes to a text file
Election_File = open('Election.txt','a')
Election_File.write('---text \n')
Election_File.write('Election Results \n')
Election_File.write('------------------------ \n')
Election_File.write(f'Total Votes: {total_votes} \n')
Election_File.write('------------------------ \n')
#Loop through the votes to get the separate candidate names and call the function to calculate their votes
for candidate_name in votes:
if candidate_name not in candidates:
candidates.append(candidate_name)
total_candidate_votes = calculate_votes(candidate_name,total_votes)
#Use the returned candidate votes to determine the winner
if winning_votes < total_candidate_votes:
winning_votes = total_candidate_votes
winner = candidate_name
#Display the winner on the terminal
print(f'-----------------------')
print(f'Winner: {winner}')
print(f'-----------------------')
print(f'--')
#Write the winner to the text file
Election_File.write('------------------------ \n')
Election_File.write(f'Winner: {winner} \n')
Election_File.write('------------------------ \n')
Election_File.write('--- \n')
#Close the text file
Election_File.close()
|
# O(a) -> O(n)
def example_1(a_list):
for a in a_list:
print(a)
# O(a * a) -> O(a^2) -> O(n^2)
def example_2(a_list):
for x in a_list:
for y in a_list:
print(x, y)
# O(a + a) -> O(2a) -> O(a) -> O(n)
def example_3(a_list):
# O(a)
for x in a_list:
print(x)
# O(a)
for y in a_list:
print(y)
# O(a) + O(b) -> O(a + b)
def example_4(a_list, b_list):
# O(a)
for x in a_list:
print(x)
# O(b)
for y in b_list:
print(y)
# O(n) + (n^2) -> O(n^2)
def example_5(a_list):
# O(n)
for a in a_list:
print(a)
# O(n^2)
for x in a_list:
for y in a_list:
print(x, y)
# O(a) * O(b) -> O(a * b)
def example_6(a_list, b_list):
for x in a_list:
for y in b_list:
print(x, y)
# O(a^3) + O(b)
def example_5(a_list, b_list):
# O(b)
for a in b_list:
print(a)
# O(a^2)
for x in a_list:
for y in a_list:
print(x, y)
# O(a^3)
for x in a_list:
for y in a_list:
for z in a_list:
print(x,y,z) |
import time
import sys
import ibmiotf.application
import ibmiotf.device
import random
import requests
organization = "00dxkm"
deviceType = "raspberrypi"
deviceId = "123456"
authMethod = "token"
authToken = "12345678"
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
deviceCli.connect()
while True:
hum=random.randint(10, 50)
temp =random.randint(30, 90)
vib=random.randint(10, 100)
curr=random.randint(10, 50)
data = { 'Temperature' : temp, 'Humidity': hum, 'Vibration': vib, 'Current':curr}
#notification alerts-----------------------------------------------------------
if temp>60 and hum>20:
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"oa1O3XmWyhSg7RY9A0JuqPTdc8wiBZHNxCKkelL5fbIGDvU2M49zwPxtsy5V6NMekh3jUpuXLFQCof0a","sender_id":"FSTSMS","message":"Temperature abnormal","language":"english","route":"p","numbers":"8096632863"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
elif vib>60:
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"oa1O3XmWyhSg7RY9A0JuqPTdc8wiBZHNxCKkelL5fbIGDvU2M49zwPxtsy5V6NMekh3jUpuXLFQCof0a","sender_id":"FSTSMS","message":"Motor condition is abnormal","language":"english","route":"p","numbers":"8096632863"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
elif curr>40:
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"oa1O3XmWyhSg7RY9A0JuqPTdc8wiBZHNxCKkelL5fbIGDvU2M49zwPxtsy5V6NMekh3jUpuXLFQCof0a","sender_id":"FSTSMS","message":"Motor components may short circuit","language":"english","route":"p","numbers":"8096632863"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)
def myOnPublishCallback():
print ("Published Temperature = %s C" % temp, "Humidity = %s %%" % hum, "Vibration = %s hz" % vib, "Current = %s amp" % curr, "to IBM Watson")
success = deviceCli.publishEvent("motor", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(2)
deviceCli.commandCallback = myCommandCallback
# Disconnect the device and application from the cloud
deviceCli.disconnect()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
`run_tests` module is responsible for running all tests
"""
# ///////////////////////////////////////////////////////////
# -----------------------------------------------------------
# File: run_tests.py
# Author: Andreas Ntalakas <antalakas>
# -----------------------------------------------------------
# ///////////////////////////////////////////////////
# Python packages
# ---------------------------------------------------
import unittest
# ---------------------------------------------------
# ///////////////////////////////////////////////////
# independence - Import Test Suite
from tests.suite import test_suite
# ///////////////////////////////////////////////////
# Execute
if __name__ == "__main__":
try:
unittest.TextTestRunner(verbosity=2).run(test_suite())
except KeyboardInterrupt:
print "\n"
|
import FWCore.ParameterSet.Config as cms
hemispheres = cms.EDFilter(
"HLTRHemisphere",
inputTag = cms.InputTag("ak4PFJetsCHS"),
minJetPt = cms.double(40),
maxEta = cms.double(3.0),
maxNJ = cms.int32(9)
)
caloHemispheres = cms.EDFilter(
"HLTRHemisphere",
inputTag = cms.InputTag("ak4CaloJets"),
minJetPt = cms.double(30),
maxEta = cms.double(3.0),
maxNJ = cms.int32(9)
)
hemisphereSequence = cms.Sequence(hemispheres)
caloHemisphereSequence = cms.Sequence(caloHemispheres)
|
H, W = map(int, input().split())
C = [input().split() for _ in range(H)]
print('\n\n')
for i in range(H):
print(C[i][0], C[i][0], sep="\n") |
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from random import randint
class PageOne(Widget):
pass
class UpdateLeds(Widget):
led = []
interval = None
def schedule(self):
self.led = [self.ids["led1"], self.ids["led2"], self.ids["led3"]]
self.interval = Clock.schedule_interval(self.update_led_state, 1/20)
def update_led_state(self, *args):
led_state = [randint(0, 1), randint(0, 1), randint(0, 1)]
print(led_state)
for i in range(0, 3):
if led_state[i] != 0:
self.led[i].basecolor = [0, 1, 0, 1]
else:
self.led[i].basecolor = [0, 0, 0, 1]
class ControlApp(App):
def build(self):
return UpdateLeds()
if __name__ == "__main__":
ControlApp().run()
|
from django.contrib import admin
from .models import Seeker,Education,Experience,Skill,Provider,Company,Job,Resumee,Application,Identity,CustomJobApplication
admin.site.register(Seeker)
admin.site.register(Education)
admin.site.register(Experience)
admin.site.register(Provider)
admin.site.register(Company)
admin.site.register(Job)
admin.site.register(Resumee)
admin.site.register(Application)
admin.site.register(Skill)
admin.site.register(Identity)
admin.site.register(CustomJobApplication) |
# -*- coding:utf-8 -*-
import cv2 as cv
import numpy as np
import sys
if __name__ == '__main__':
# 创建矩阵,用于求像素之间的距离
array = np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]], dtype='uint8')
# 分别计算街区距离、欧氏距离和棋盘距离
dst_L1 = cv.distanceTransform(array, cv.DIST_L1, cv.DIST_MASK_3)
dst_L2 = cv.distanceTransform(array, cv.DIST_L2, cv.DIST_MASK_5)
dst_C = cv.distanceTransform(array, cv.DIST_C, cv.DIST_MASK_3)
# 对图像进行读取
rice = cv.imread('./images/rice.png', cv.IMREAD_GRAYSCALE)
if rice is None:
print('Failed to read rice.png.')
sys.exit()
# 将图像转成二值图像,同时将黑白区域互换
rice_BW = cv.threshold(rice, 50, 255, cv.THRESH_BINARY)
rice_BW_INV = cv.threshold(rice, 50, 255, cv.THRESH_BINARY_INV)
# 图像距离变换
dst_rice_BW = cv.distanceTransform(rice_BW[1], 1, 3, dstType=cv.CV_32F)
dst_rice_BW_INV = cv.distanceTransform(rice_BW_INV[1], 1, 3, dstType=cv.CV_8U)
# 展示矩阵距离计算结果
print('街区距离:\n{}'.format(dst_L1))
print('欧氏距离:\n{}'.format(dst_L2))
print('棋盘距离:\n{}'.format(dst_C))
# 展示二值化、黑白互换后的图像及距离变换结果
cv.imshow('rice_BW', rice_BW[1])
cv.imshow('rice_BW_INV', rice_BW_INV[1])
cv.imshow('dst_rice_BW', dst_rice_BW)
cv.imshow('dst_rice_BW_INV', dst_rice_BW_INV)
cv.waitKey(0)
cv.destroyAllWindows()
|
from __future__ import unicode_literals
from ..login_reg_app.models import User
from django.db import models
class Food(models.Model):
food = models.CharField(max_length=255)
quantity = models.DecimalField(default=1, max_digits=4, decimal_places=2)
calories = models.DecimalField(default=0, max_digits=7, decimal_places=2)
carbohydrates = models.DecimalField(default=0, max_digits=7, decimal_places=2)
lipids = models.DecimalField(default=0, max_digits=7, decimal_places=2)
protein = models.DecimalField(default=0, max_digits=7, decimal_places=2)
sugar = models.DecimalField(default=0, max_digits=5, decimal_places=2)
user = models.ForeignKey(User)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-12-15 22:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0209_auto_20211215_2211'),
]
operations = [
migrations.RunSQL("insert into dataentry_projectcategory (name, sort_order) values ('Transit Stations',1)"),
migrations.RunSQL("insert into dataentry_projectcategory (name, sort_order) values ('Impact Multiplying',2)"),
migrations.RunSQL("insert into dataentry_projectcategory (name, sort_order) values ('Admin',3)"),
migrations.RunSQL("update dataentry_borderstation set project_category_id=1 where non_transit=false"),
migrations.RunSQL("update dataentry_borderstation set project_category_id=2 where non_transit=true"),
migrations.RunSQL("update dataentry_borderstation set features='hasStaff;hasProjectStats;hasLocations;hasLocationStaffing;hasForms' where non_transit=true"),
migrations.RunSQL("update dataentry_permission set min_level='PROJECT' where min_level='STATION'"),
migrations.RunSQL("update dataentry_permission set permission_group='PROJECTS' where permission_group='STATIONS'"),
migrations.RunSQL("update dataentry_permission set permission_group='PROJECT_STATISTICS' where permission_group='STATION_STATISTICS'"),
]
|
l = list(input().split())
c = ['i', 'pa', 'te', 'ni', 'niti', 'a', 'ali', 'nego', 'no', 'ili']
for s in l:
if(s not in c or s is l[0]):
print(s[0].upper(),end='') |
# -*- coding=utf-8 -*-
from flask import url_for, render_template, redirect
from datetime import datetime
from . import main
@main.route('/')
def index():
return render_template('index.html')
@main.route('/index')
def index_():
print url_for('index')
return redirect(url_for('index'))
|
#!/usr/bin/env python
from setuptools import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = ""
packages = [
'requests_respectful',
]
requires = [
'requests>=2.0.0',
'redis>=2.10.3',
'PyYaml',
]
setup(
name='requests-respectful',
version="0.1.2",
description='Minimalist wrapper on top of Requests to work within rate limits of any amount of services simultaneously. Parallel processing friendly.',
long_description=long_description,
author="Nicholas Brochu",
author_email='info@nicholasbrochu.com',
packages=packages,
include_package_data=True,
install_requires=requires,
license='Apache License v2',
url='https://github.com/nbrochu/requests-respectful',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
|
# Generated by Django 3.2.5 on 2021-08-16 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sub_part', '0017_quo_add_database'),
]
operations = [
migrations.CreateModel(
name='purchase_add_database',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('select_warehouse', models.CharField(max_length=100)),
('select_supplier', models.CharField(max_length=100)),
('select_state', models.CharField(max_length=100)),
('product_name', models.CharField(max_length=100)),
('quantatity', models.CharField(max_length=100)),
('unit_price', models.CharField(max_length=100)),
('product_image', models.CharField(max_length=100)),
],
),
]
|
# coding=utf-8
"""
字典类转换工具
"""
# TODO 从list文件中读取信息然后拼装成map list(map(lambda x: x['item_id'], sku_data)) list(set(item_ids))
# TODO 保存一些常见的枚举类信息 json.dumps(spu_draft_data, cls=bm.ExtendJSONEncoder)
# TODO 直接从数据库读取注释信息 拼接成map
# TODO 直接从代码中读取map
# TODO 什么是迭代器?什么是生成器?
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Project: GDAL utils.auxiliary
# Purpose: gdal utility functions
# Author: Even Rouault <even.rouault at spatialys.com>
# Author: Idan Miara <idan@miara.com>
#
# ******************************************************************************
# Copyright (c) 2015, Even Rouault <even.rouault at spatialys.com>
# Copyright (c) 2020, Idan Miara <idan@miara.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from typing import Optional, Union
from osgeo import gdal
from osgeo_utils.auxiliary.base import get_extension, is_path_like, PathLike
PathOrDS = Union[PathLike, gdal.Dataset]
def DoesDriverHandleExtension(drv: gdal.Driver, ext: str):
exts = drv.GetMetadataItem(gdal.DMD_EXTENSIONS)
return exts is not None and exts.lower().find(ext.lower()) >= 0
def GetOutputDriversFor(filename: PathLike, is_raster=True):
drv_list = []
ext = get_extension(filename)
if ext.lower() == 'vrt':
return ['VRT']
for i in range(gdal.GetDriverCount()):
drv = gdal.GetDriver(i)
if (drv.GetMetadataItem(gdal.DCAP_CREATE) is not None or
drv.GetMetadataItem(gdal.DCAP_CREATECOPY) is not None) and \
drv.GetMetadataItem(gdal.DCAP_RASTER if is_raster else gdal.DCAP_VECTOR) is not None:
if ext and DoesDriverHandleExtension(drv, ext):
drv_list.append(drv.ShortName)
else:
prefix = drv.GetMetadataItem(gdal.DMD_CONNECTION_PREFIX)
if prefix is not None and filename.lower().startswith(prefix.lower()):
drv_list.append(drv.ShortName)
# GMT is registered before netCDF for opening reasons, but we want
# netCDF to be used by default for output.
if ext.lower() == 'nc' and not drv_list and \
drv_list[0].upper() == 'GMT' and drv_list[1].upper() == 'NETCDF':
drv_list = ['NETCDF', 'GMT']
return drv_list
def GetOutputDriverFor(filename: PathLike, is_raster=True, default_raster_format='GTiff',
default_vector_format='ESRI Shapefile'):
if not filename:
return 'MEM'
drv_list = GetOutputDriversFor(filename, is_raster)
ext = get_extension(filename)
if not drv_list:
if not ext:
return default_raster_format if is_raster else default_vector_format
else:
raise Exception("Cannot guess driver for %s" % filename)
elif len(drv_list) > 1:
print("Several drivers matching %s extension. Using %s" % (ext if ext else '', drv_list[0]))
return drv_list[0]
def open_ds(filename_or_ds: PathOrDS, *args, **kwargs):
ods = OpenDS(filename_or_ds, *args, **kwargs)
return ods.__enter__()
def get_ovr_count(filename_or_ds: PathOrDS):
with OpenDS(filename_or_ds) as ds:
bnd = ds.GetRasterBand(1)
return bnd.GetOverviewCount()
def get_ovr_idx(filename_or_ds: PathOrDS, ovr_idx: Optional[int]):
if ovr_idx is None:
ovr_idx = 0
if ovr_idx < 0:
# -1 is the last overview; -2 is the one before the last
overview_count = get_ovr_count(open_ds(filename_or_ds))
ovr_idx = max(0, overview_count + ovr_idx + 1)
return ovr_idx
class OpenDS:
__slots__ = ['filename', 'ds', 'args', 'kwargs', 'own', 'silent_fail']
def __init__(self, filename_or_ds: PathOrDS, silent_fail=False, *args, **kwargs):
self.ds: Optional[gdal.Dataset] = None
self.filename: Optional[PathLike] = None
if is_path_like(filename_or_ds):
self.filename = str(filename_or_ds)
else:
self.ds = filename_or_ds
self.args = args
self.kwargs = kwargs
self.own = False
self.silent_fail = silent_fail
def __enter__(self) -> gdal.Dataset:
if self.ds is None:
self.ds = self._open_ds(self.filename, *self.args, **self.kwargs)
if self.ds is None and not self.silent_fail:
raise IOError('could not open file "{}"'.format(self.filename))
self.own = True
return self.ds
def __exit__(self, exc_type, exc_val, exc_tb):
if self.own:
self.ds = False
@staticmethod
def _open_ds(
filename: PathLike,
access_mode=gdal.GA_ReadOnly,
ovr_idx: Optional[int] = None,
open_options: Optional[dict] = None,
logger=None,
):
open_options = dict(open_options or dict())
ovr_idx = get_ovr_idx(filename, ovr_idx)
if ovr_idx > 0:
open_options["OVERVIEW_LEVEL"] = ovr_idx - 1 # gdal overview 0 is the first overview (after the base layer)
if logger is not None:
s = 'opening file: "{}"'.format(filename)
if open_options:
s = s + " with options: {}".format(str(open_options))
logger.debug(s)
open_options = ["{}={}".format(k, v) for k, v in open_options.items()]
return gdal.OpenEx(str(filename), access_mode, open_options=open_options)
|
stops = input()
data = input()
while not data == "Travel":
line = data.split(":")
command = line[0]
if command == "Add Stop":
index = int(line[1])
string_to_insert = line[2]
if index in range(len(stops)):
first_half = stops[:index]
second_half = stops[index:]
stops = first_half + string_to_insert + second_half
print(stops)
elif command == "Remove Stop":
start_index = int(line[1])
end_index = int(line[2])
if start_index in range(len(stops)) and end_index in range(len(stops)):
first_half = stops[:start_index]
second_half = stops[end_index + 1:]
stops = first_half + second_half
print(stops)
elif command == "Switch":
old_string = line[1]
new_string = line[2]
if old_string in stops:
stops = stops.replace(old_string, new_string)
print(stops)
data = input()
print(f"Ready for world tour! Planned stops: {stops}") |
# Generated by Django 3.1.5 on 2021-01-18 14:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortcode', '0003_auto_20210115_1141'),
]
operations = [
migrations.AddConstraint(
model_name='shortcode',
constraint=models.CheckConstraint(check=models.Q(short_url__length__gte=4), name='shortcode_shortcode_short_url_lentgh'),
),
]
|
import time
import RPi.GPIO as GPIO
import random
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
p = GPIO.PWM(5, 50) # GPIO pin=18 frequency=50Hz
p.start(0)
try:
while 1:
for dc in range(0, 101):
p.ChangeDutyCycle(int(random.random() * 100))
time.sleep(1)
for dc in range(100, -1, -1):
p.ChangeDutyCycle(dc)
time.sleep(0.5)
except KeyboardInterrupt:
pass
p.stop()
GPIO.cleanup()
|
import os
import sys
BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from pytorch_transformers.modeling_TSbert_v3 import BertForSequenceClassificationTSv3
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.configuration_bert import BertConfig
from pytorch_transformers.optimization_bert import WarmupLinearSchedule,BertAdam
import argparse
import torch
import random
import numpy as np
import pickle
import logging
from tqdm import tqdm, trange
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
from utils_TSbert_v3 import MyDataProcessorSegres,convert_examples_to_features_Segres,Metrics
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
# from torch.utils.data.distributed import DistributedSampler
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser()
## Required parameters
# parser.add_argument("--finaldim",
# default=300,
# type=int,
# help="..")
parser.add_argument("--data_dir",
default="data/",
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default="bert-base-chinese", type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default="alime",
type=str,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default="model_save_v3",
type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--temp_score_file_path",
default="temp_score_file.txt",
type=str,
help="temp score_file_path where the model predictions will be written for metrics.")
parser.add_argument("--log_save_path",
default="log.txt",
type=str,
help="log written when training")
parser.add_argument("--max_segment_num",
default=10,
type=int,
help="The maximum total segment number.")
parser.add_argument("--max_seq_length",
default=350,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--input_cache_dir",
default="input_cache_v3",
type=str,
help="Where do you want to store the processed model input")
parser.add_argument("--do_train",
type=bool,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_lower_case",
default=True,
type=bool,
help="Set this flag if you are using an uncased model.")
parser.add_argument("--num_train_epochs",
default=5,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--train_batch_size",
default=20,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
args = parser.parse_args()
args.temp_score_file_path=os.path.join(args.data_dir,args.task_name,args.output_dir,args.temp_score_file_path)
args.log_save_path=os.path.join(args.data_dir,args.task_name,args.output_dir,args.log_save_path)
args.output_dir=os.path.join(args.data_dir,args.task_name,args.output_dir)
args.input_cache_dir=os.path.join(args.data_dir, args.task_name, args.input_cache_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not os.path.exists(args.input_cache_dir):
os.makedirs(args.input_cache_dir)
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler(args.log_save_path)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
logger.info(args)
def set_seed():
n_gpu = torch.cuda.device_count()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed) # 为所有的GPU设置种子
def get_dataloader(tokenizer,examples,label_list,tag):
logger.info("start prepare input data")
cached_train_features_file = os.path.join(args.input_cache_dir,tag+"input.pkl")
# train_features = None
try:
with open(cached_train_features_file, "rb") as reader:
features= pickle.load(reader)
except:
logger.info("start prepare features_res_lab")
features = convert_examples_to_features_Segres(
examples, label_list, max_seg_num=args.max_segment_num,max_seq_length=args.max_seq_length, tokenizer=tokenizer)
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(features, writer)
# logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(examples))
seg_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
seg_token_type_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
seg_attention_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
cls_sep_pos = torch.tensor([f.cls_sep_pos for f in features], dtype=torch.long)
true_len = torch.tensor([f.true_len for f in features], dtype=torch.long)
labels = torch.FloatTensor([f.label_id for f in features])
train_data = TensorDataset(seg_input_ids,seg_token_type_ids, seg_attention_mask ,
cls_sep_pos,true_len,labels)
if(tag=="train"):
train_sampler = RandomSampler(train_data)
else:
train_sampler = SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
return train_dataloader
def eval(model,tokenizer,device,myDataProcessorSeg):
logger.info("start evaluation")
# uttdatafile = os.path.join(args.data_dir, args.task_name, "test.txt")
segdatafile = os.path.join(args.data_dir, args.task_name, "testseg.txt")
examples= myDataProcessorSeg.get_test_examples(segdatafile)
label_list = myDataProcessorSeg.get_labels()
eval_dataloader = get_dataloader(tokenizer, examples,label_list, "valid")
y_pred = []
y_label=[]
metrics = Metrics(args.temp_score_file_path)
for batch in tqdm(eval_dataloader,desc="Evaluating"):
batch = tuple(t.to(device) for t in batch)
seg_input_ids, seg_token_type_ids, seg_attention_mask, \
cls_sep_pos, true_len,labels = batch
y_label+=labels.data.cpu().numpy().tolist()
with torch.no_grad():
logits= model(seg_input_ids, seg_token_type_ids, seg_attention_mask,cls_sep_pos, true_len,labels=None)
y_pred += logits.data.cpu().numpy().tolist()
with open(args.temp_score_file_path, 'w',encoding='utf-8') as output:
for score, label in zip(y_pred, y_label):
output.write(
str(score) + '\t' +
str(int(label)) + '\n'
)
result = metrics.evaluate_all_metrics()
return result
def train(model,tokenizer,device,myDataProcessorSeg,n_gpu):
# uttdatafile=os.path.join(args.data_dir,args.task_name,"train.txt")
segdatafile = os.path.join(args.data_dir, args.task_name, "trainseg.txt")
best_result = [0, 0, 0, 0, 0, 0]
# examples_res_lab, examples_utt= myDataProcessorUtt.get_train_examples(uttdatafile)
examples=myDataProcessorSeg.get_train_examples(segdatafile)
num_train_optimization_steps = int(
len(examples) / args.train_batch_size) * args.num_train_epochs
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
label_list = myDataProcessorSeg.get_labels()
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
train_dataloader=get_dataloader(tokenizer,examples,label_list,"train")
set_seed()
model.train()
global_step = 0
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
# nb_tr_examples, nb_tr_steps = 0, 0
s=0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
model.train()
batch = tuple(t.to(device) for t in batch)
seg_input_ids, seg_token_type_ids, seg_attention_mask,cls_sep_pos, true_len,labels = batch
# define a new function to compute loss values for both output_modes
logits,loss = model(seg_input_ids,seg_token_type_ids, seg_attention_mask ,cls_sep_pos, true_len,labels)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
tr_loss += loss.item()
s+=1
loss.backward()
optimizer.step()
optimizer.zero_grad()
logger.info('Epoch{} Batch{} - loss: {:.6f} batch_size:{}'.format(epoch,step, loss.item(), labels.size(0)) )
global_step += 1
logger.info("average loss(:.6f)".format(tr_loss/s))
# Save a trained model, configuration and tokenizer
model.eval()
result=eval(model, tokenizer, device, myDataProcessorSeg)
logger.info("Evaluation Result: \nMAP: %f\tMRR: %f\tP@1: %f\tR1: %f\tR2: %f\tR5: %f",
result[0], result[1], result[2], result[3], result[4], result[5])
if(result[3] + result[4] + result[5] > best_result[3] + best_result[4] + best_result[5]):
logger.info("save model")
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
best_result=result
logger.info("best result")
logger.info("Best Result: \nMAP: %f\tMRR: %f\tP@1: %f\tR1: %f\tR2: %f\tR5: %f",
best_result[0], best_result[1], best_result[2],
best_result[3],best_result[4],best_result[5] )
def main():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
set_seed()
myDataProcessorSeg=MyDataProcessorSegres()
# label_list = myDataProcessorUtt.get_labels()
# num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
config = BertConfig.from_pretrained(args.bert_model)
if args.do_train:
logger.info("start train...")
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
model = BertForSequenceClassificationTSv3.from_pretrained(args.bert_model,
config=config,
max_seg_num=args.max_segment_num,
max_seq_len=args.max_seq_length,
device=device)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
train(model,tokenizer,device,myDataProcessorSeg,n_gpu)
else:
logger.info("start test...")
logger.info("load dict...")
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
model_state_dict = torch.load(output_model_file)
model = BertForSequenceClassificationTSv3.from_pretrained(args.bert_model, config=config,
state_dict=model_state_dict,
max_seg_num=args.max_segment_num,
max_seq_len=args.max_seq_length,
device=device)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# similar_score(model, tokenizer, device,myDataProcessorSeg)
result = eval(model, tokenizer, device, myDataProcessorSeg)
logger.info("Evaluation Result: \nMAP: %f\tMRR: %f\tP@1: %f\tR1: %f\tR2: %f\tR5: %f",
result[0], result[1], result[2], result[3], result[4], result[5])
print(result)
if __name__ == "__main__":
main()
|
from BiTree import BiTNode, Arr2Tree, InOrder
from Array import RandArr
def CopyTree(root: BiTNode):
if root is None:
return None
node = BiTNode()
node.data = root.data
node.lchild = CopyTree(root.lchild)
node.rchild = CopyTree(root.rchild)
return node
if __name__ == '__main__':
arr = RandArr(10)
root = Arr2Tree(arr, 0, len(arr) -1)
root2 = CopyTree(root)
InOrder(root)
print()
InOrder(root2) |
import tensorflow as tf
import os
import cv2
import numpy as np
import random
import datasets.preprocessing as preprocessing
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def load_image(src, filters='RGB', preprocessor=None):
im = cv2.imread(src, 1)
if im is not None:
height, width, channels = im.shape
min_dim = min(height, width)
im = im[height // 2 - min_dim // 2: height // 2 + min_dim // 2,
width // 2 - min_dim // 2: width // 2 + min_dim // 2]
im = cv2.resize(im, (448, 448), interpolation=cv2.INTER_CUBIC)
if preprocessor:
im = preprocessor.apply(im)
if filters == 'LAB':
im = cv2.cvtColor(im, cv2.COLOR_BGR2LAB)
elif filters == 'YUV':
im = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
elif filters == 'RGB':
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = im.astype(np.float32)
return im
else:
print(src)
return None
def save_data(source_name, dataset_name=None, train=.8, seed=44, filters='RGB',
preprocessor=None, preprocess_all=False):
source_name = os.path.join(__location__, source_name)
if not dataset_name:
dataset_name = source_name
data = []
with open(os.path.join(__location__, dataset_name + '.txt'), 'w') as f:
for i, class_name in enumerate(os.listdir(source_name)):
f.write('%s\n' % (class_name.replace('+', ' ')))
for j in os.listdir(source_name + '/' + class_name):
data.append((source_name + '/%s/%s' % (class_name, j), i))
if seed:
data.sort(key=lambda x: x[0] + '/' + str(x[1]))
random.seed(seed)
random.shuffle(data)
data = {
'train': data[:int(train * len(data))],
'val': data[int(train * len(data)):]
}
for mode in ['train', 'val']:
filename = os.path.join(__location__, '%s-%s.tfrecords' % (dataset_name, mode))
writer = tf.python_io.TFRecordWriter(filename)
if not preprocess_all:
example_index = 1
for address, label in data[mode]:
print('\rLoading %s/%s %s image...' % (example_index, len(data[mode]), mode), end='')
example_index += 1
img = load_image(address, filters, preprocessor)
feature = {mode + '/label': _int64_feature(label),
mode + '/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
else:
example_index = 1
images = []
labels = []
for address, label in data[mode]:
print('\rLoading %s/%s %s image...' % (example_index, len(data[mode]), mode), end='')
example_index += 1
img = load_image(address, filters)
images.append(img)
labels.append(label)
images = np.array(images)
images = preprocessor.apply(images)
for img, label in zip(images, labels):
feature = {mode + '/label': _int64_feature(label),
mode + '/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
# p = preprocessing.PCAPreprocessor(2)
# save_data('cladonia', 'cladoniapca2', filters=None, preprocessor=p)
# save_data('cladonia')
save_data('moss', 'moss')
|
# Copyright (c) 2021 Philip May
# This software is distributed under the terms of the MIT license
# which is available at https://opensource.org/licenses/MIT
"""Util functionality and tools."""
import logging
import warnings
from typing import Callable
_logger = logging.getLogger(__name__)
def func_no_exception_caller(func: Callable, *args, **kwargs):
"""Delegate the function call and log exceptions.
This function catches all exceptions and just logs them.
Returns:
The function result or ``None`` is an exception was raised.
"""
result = None
try:
result = func(*args, **kwargs)
except Exception as e:
error_msg = "Exception raised calling {}! With args: {} kwargs: {} exception: {}".format(
func.__name__, args, kwargs, e
)
_logger.error(error_msg, exc_info=True)
warnings.warn(error_msg, RuntimeWarning)
return result
|
# test delay of "for loop" and lazy computation
#
#
import time
def generated_list():
i = 0
while True:
i += 1
print("i in generateor:", i)
yield i
if __name__ == "__main__":
generated = generated_list()
for i in generated:
print("i in main", i)
time.sleep(1)
|
# # num = 45
# # print(type(num))
# # name = 'It Education'
# # print(type(name))
# # floating = 879.548
# # print(type(floating))
# num1 = 87
# num2 = 45
# var = "Youtube"
# print(num1+ float(num2))
# num3 = 548.48
# print(int(num3))
print("What is your name?")
a = input()
print('Your name is', a)
print("What is your age?")
b = int(input())
print('Your age is', b) |
import asyncio
import functools
import threading
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim
def with_asyncio_loop(f):
@functools.wraps(f)
def runner(*args, **kwds):
loop = asyncio.new_event_loop()
loop.set_debug(True)
try:
return loop.run_until_complete(f(*args, **kwds))
finally:
loop.close()
return runner
@skip_on_cudasim('CUDA Driver API unsupported in the simulator')
class TestCudaStream(CUDATestCase):
def test_add_callback(self):
def callback(stream, status, event):
event.set()
stream = cuda.stream()
callback_event = threading.Event()
stream.add_callback(callback, callback_event)
self.assertTrue(callback_event.wait(1.0))
def test_add_callback_with_default_arg(self):
callback_event = threading.Event()
def callback(stream, status, arg):
self.assertIsNone(arg)
callback_event.set()
stream = cuda.stream()
stream.add_callback(callback)
self.assertTrue(callback_event.wait(1.0))
@with_asyncio_loop
async def test_async_done(self):
stream = cuda.stream()
await stream.async_done()
@with_asyncio_loop
async def test_parallel_tasks(self):
async def async_cuda_fn(value_in: float) -> float:
stream = cuda.stream()
h_src, h_dst = cuda.pinned_array(8), cuda.pinned_array(8)
h_src[:] = value_in
d_ary = cuda.to_device(h_src, stream=stream)
d_ary.copy_to_host(h_dst, stream=stream)
done_result = await stream.async_done()
self.assertEqual(done_result, stream)
return h_dst.mean()
values_in = [1, 2, 3, 4]
tasks = [asyncio.create_task(async_cuda_fn(v)) for v in values_in]
values_out = await asyncio.gather(*tasks)
self.assertTrue(np.allclose(values_in, values_out))
@with_asyncio_loop
async def test_multiple_async_done(self):
stream = cuda.stream()
done_aws = [stream.async_done() for _ in range(4)]
done = await asyncio.gather(*done_aws)
for d in done:
self.assertEqual(d, stream)
@with_asyncio_loop
async def test_multiple_async_done_multiple_streams(self):
streams = [cuda.stream() for _ in range(4)]
done_aws = [stream.async_done() for stream in streams]
done = await asyncio.gather(*done_aws)
# Ensure we got the four original streams in done
self.assertSetEqual(set(done), set(streams))
@with_asyncio_loop
async def test_cancelled_future(self):
stream = cuda.stream()
done1, done2 = stream.async_done(), stream.async_done()
done1.cancel()
await done2
self.assertTrue(done1.cancelled())
self.assertTrue(done2.done())
@skip_on_cudasim('CUDA Driver API unsupported in the simulator')
class TestFailingStream(CUDATestCase):
# This test can only be run in isolation because it corrupts the CUDA
# context, which cannot be recovered from within the same process. It is
# left here so that it can be run manually for debugging / testing purposes
# - or may be re-enabled if in future there is infrastructure added for
# running tests in a separate process (a subprocess cannot be used because
# CUDA will have been initialized before the fork, so it cannot be used in
# the child process).
@unittest.skip
@with_asyncio_loop
async def test_failed_stream(self):
ctx = cuda.current_context()
module = ctx.create_module_ptx("""
.version 6.5
.target sm_30
.address_size 64
.visible .entry failing_kernel() { trap; }
""")
failing_kernel = module.get_function("failing_kernel")
stream = cuda.stream()
failing_kernel.configure((1,), (1,), stream=stream).__call__()
done = stream.async_done()
with self.assertRaises(Exception):
await done
self.assertIsNotNone(done.exception())
if __name__ == '__main__':
unittest.main()
|
import sqlite3
connect = sqlite3.connect('users.db')
c = connect.cursor()
passw = 'SELECT * FROM users'
def Ler_Dados():
for row in c.execute(passw):
print row
Ler_Dados()
|
import requests
from flask_restful import Resource, request
import re
from bs4 import BeautifulSoup
import json
from common import util
from urllib.parse import urlencode
class BolsaFamilia(Resource):
SITE_URL = 'http://www.transparencia.gov.br/api-de-dados/bolsa-familia-disponivel-por-cpf-ou-nis'
def getBsObject(self, url_next_layer):
print("Requesting URL: " + url_next_layer)
# print("\n requesting URL:", url_next_layer)
res_layer = requests.get(url_next_layer).content
return BeautifulSoup(res_layer, features="lxml")
def do_crawler(self, **kargs):
dict_query = {}
dict_query['codigo'] = kargs.get('nis')
dict_query['anoMesReferencia'] = kargs.get('ano_mes_referencia')
dict_query['anoMesCompetencia'] = kargs.get('ano_mes_competencia')
result = requests.get(self.SITE_URL + "?" + urlencode(dict_query))
res_json = result.json()
json_response = {}
if result.status_code == 400:
return {"message":"Nenhum resultado encontrado"}, 404
for key, value in enumerate(res_json[0]):
json_response[value] = res_json[0][value]
return json_response
def post(self):
if(not util.checkAuthorization(request.headers)):
return {"message": "ERROR: Chave de acesso inválida"}, 401
else:
params = request.get_json()
if params:
if 'nis' not in params:
return {"message":"Parametro 'nis' é obrigatório"}, 400
if 'ano_mes_referencia' not in params:
return {"message":"Parametro 'ano_mes_referencia' é obrigatório"}, 400
if 'ano_mes_competencia' not in params:
return {"message":"Parametro 'ano_mes_competencia' é obrigatório"}, 400
else:
result = self.do_crawler(nis=params['nis'], ano_mes_competencia = params['ano_mes_competencia'],
ano_mes_referencia = params['ano_mes_referencia'])
return result
else:
return {"message":"Requisição inválida"}, 400
|
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
btn_go = InlineKeyboardButton('Начнем!', callback_data='go')
start_menu = InlineKeyboardMarkup().row(btn_go)
btn1_v1 = InlineKeyboardButton('[1, 2, 3]', callback_data='1')
btn2_v1 = InlineKeyboardButton('[1, 2, 2]', callback_data='1well')
btn3_v1 = InlineKeyboardButton('[1, 2, 2, 3]', callback_data='1')
btn4_v1 = InlineKeyboardButton('Ошибка', callback_data='1')
first_menu = InlineKeyboardMarkup().row(btn1_v1, btn2_v1).row(btn3_v1).row(btn4_v1)
btn1_v2 = InlineKeyboardButton('1', callback_data='2')
btn2_v2 = InlineKeyboardButton('0', callback_data='2')
btn3_v2 = InlineKeyboardButton('Ошибка', callback_data='2well')
second_menu = InlineKeyboardMarkup().row(btn1_v2, btn2_v2).row(btn3_v2)
btn1_v3 = InlineKeyboardButton('for', callback_data='3')
btn2_v3 = InlineKeyboardButton('func', callback_data='3')
btn3_v3 = InlineKeyboardButton('def', callback_data='3well')
btn4_v3 = InlineKeyboardButton('while', callback_data='3')
third_menu = InlineKeyboardMarkup().row(btn1_v3, btn2_v3).row(btn3_v3, btn4_v3)
btn1_v4 = InlineKeyboardButton('6', callback_data='4well')
btn2_v4 = InlineKeyboardButton('2', callback_data='4')
btn3_v4 = InlineKeyboardButton('5', callback_data='4')
btn4_v4 = InlineKeyboardButton('1', callback_data='4')
fourth_menu = InlineKeyboardMarkup().row(btn1_v4, btn2_v4).row(btn3_v4, btn4_v4)
btn1_v5 = InlineKeyboardButton('5,5', callback_data='5')
btn2_v5 = InlineKeyboardButton('1', callback_data='5well')
btn3_v5 = InlineKeyboardButton('2', callback_data='5')
btn4_v5 = InlineKeyboardButton('Ошибка', callback_data='5')
fifth_menu = InlineKeyboardMarkup().row(btn1_v5, btn2_v5).row(btn3_v5, btn4_v5)
btn1_v6 = InlineKeyboardButton('6', callback_data='6')
btn2_v6 = InlineKeyboardButton('7', callback_data='6')
btn3_v6 = InlineKeyboardButton('8', callback_data='6well')
btn4_v6 = InlineKeyboardButton('Ошибка', callback_data='6')
sixth_menu = InlineKeyboardMarkup().row(btn1_v6, btn2_v6).row(btn3_v6, btn4_v6)
btn1_v7 = InlineKeyboardButton('6', callback_data='7well')
btn2_v7 = InlineKeyboardButton('2a', callback_data='7')
btn3_v7 = InlineKeyboardButton('24', callback_data='7')
btn4_v7 = InlineKeyboardButton('Ошибка', callback_data='7')
seventh_menu = InlineKeyboardMarkup().row(btn1_v7, btn2_v7).row(btn3_v7, btn4_v7)
btn1_v8 = InlineKeyboardButton('Hello', callback_data='8')
btn2_v8 = InlineKeyboardButton('o', callback_data='8')
btn3_v8 = InlineKeyboardButton('olleH', callback_data='8well')
btn4_v8 = InlineKeyboardButton('Ошибка', callback_data='8')
eighth_menu = InlineKeyboardMarkup().row(btn1_v8, btn2_v8).row(btn3_v8, btn4_v8)
btn1_v9 = InlineKeyboardButton('10', callback_data='9')
btn2_v9 = InlineKeyboardButton('22222', callback_data='9well')
btn3_v9 = InlineKeyboardButton('25', callback_data='9')
btn4_v9 = InlineKeyboardButton('Ошибка', callback_data='9')
ninth_menu = InlineKeyboardMarkup().row(btn1_v9, btn2_v9).row(btn3_v9, btn4_v9)
btn1_v10 = InlineKeyboardButton('0', callback_data='-')
btn2_v10 = InlineKeyboardButton('1', callback_data='-')
btn3_v10 = InlineKeyboardButton('2', callback_data='-')
btn4_v10 = InlineKeyboardButton('3', callback_data='-well')
tenth_menu = InlineKeyboardMarkup().row(btn1_v10, btn2_v10).row(btn3_v10, btn4_v10)
admin_btn = InlineKeyboardButton('Перейти в чат', url= 'https://t.me/llizzzaa327')
admin_menu = InlineKeyboardMarkup().row(admin_btn) |
import random
import time
import urllib2
import re
from bs4 import BeautifulSoup
import xlwt
import xlrd
from xlutils.copy import copy
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from fake_useragent import UserAgent
ua = UserAgent()
headers = {'User-Agent' : ua.random}
def get_info_of_one_category(category_id):
print("\ncategoryID:"+(category_id))
ASINs = []
Prices = []
Reviews = []
Stars = []
Images = []
Titles = []
info = []
errCategory = []
addr = 'https://www.amazon.com/gp/new-releases/hi/'+category_id
#addr = 'https://www.amazon.com/gp/bestsellers/hi/'+category_id
#addr = 'https://www.amazon.com/gp/movers-and-shakers/'+category_id
k=0
while True:
req = urllib2.Request(addr,headers=headers)
#Get page of 50 items, ignore page of 20 items
for req_num in range(0,10):
print("Trying to connect the html......")
for try_num in range(0,10):
try:
response = urllib2.urlopen(req,timeout=45)
raw_html = response.read()
print("Get html page successfully")
break
except:
if try_num == 9:
print("The html page cannot be reached, quit it. Return errCategory.")
errCategory.append(category_id)
return errCategory
print("Failed to get html, waiting 10s-15s and try again")
time.sleep(random.randint(10,15))
bsObj = BeautifulSoup(raw_html,'html.parser')
targetObj = bsObj.find("div",id="zg-center-div")
if targetObj==None:
print("Get page of 20 items, request again after 5s-10s")
time.sleep(random.randint(5,10))
else:
print("Get page of 50 items at "+str(req_num)+"-th request")
break
if req_num == 9 and targetObj == None:
print("Error: page of 50 items not exist. Each page has 20 items.")
errCategory.append(category_id)
return errCategory
#Get product info in current page
all_li = targetObj.find_all("li",class_="zg-item-immersion")
if len(all_li)==0:
print("li not found, different page appear. Return errCategory.")
errCategory.append(category_id)
return errCategory
for li in all_li:
if re.search(r'/B([0-9A-Z]*)/',str(li)) != None:
asin = re.search(r'/B([0-9A-Z]*)/',str(li)).group()
price = 0.0
target_prc = re.search(r'\$([0-9,\.]*)',str(li))
if target_prc != None:
price = float(str(target_prc.group(1)).replace(',',''))
star = 10.0
review = 0
all_a = li.find_all("a")
for a in all_a:
#print(a.get("href"))
rv_tar = re.search(r'review',str(a.get("href")),re.I)
if rv_tar != None:
star_tar = re.search(r'([0-9\.]*) out of 5 stars',str(a.get_text()),re.I)
if star_tar != None:
star = float(star_tar.group(1))
else:
review_tar = re.search(r'([0-9,])',str(a.get_text()))
if review_tar != None:
review = int(review_tar.group(1).replace(',',''))
img = li.find("img")
title = str(img.get("alt"))
image = str(img.get("src"))
#print("ASIN:"+str(asin))
#print("target_price",target_prc)
#print(price,review,star)
#print('\n')
if price > 5 and price < 40 and review < 80 and star > 3.6:
#print("\n")
ASINs.append(asin)
Prices.append(price)
Stars.append(star)
Reviews.append(review)
Images.append(image)
Titles.append(title)
#Get addr of next page
next_page = addr
all_a = targetObj.find_all("a")
for i in all_a:
if i.find(text = "Next page") != None:
target_a = i
next_page = str(target_a.get('href'))
print("next_page addr="+next_page)
if next_page == addr:
print("Next page not exist! Get "+str(k+1)+" pages in all.")
break;
else:
addr=next_page
k=k+1
if len(ASINs)==0:
return None
else:
info.append(ASINs)
info.append(Prices)
info.append(Stars)
info.append(Reviews)
info.append(Images)
info.append(Titles)
return info
def get_info_of_all_category():
pd_title = ["fatherASIN","sonASIN","Price","Image","Title","Review","Stars","Date","Rank1","RankDetail1","Rank2","RankDetail2","Rank3","RankDetail3"]
while True:
id_wbk_rd = xlrd.open_workbook('CategoryID.xls')
id_sheet_rd = id_wbk_rd.sheet_by_name('CategoryID')
index_now = int(id_sheet_rd.cell(0,1).value)
index_end = int(id_sheet_rd.cell(1,1).value)
row_nml = int(id_sheet_rd.cell(2,1).value)
row_err = int(id_sheet_rd.cell(3,1).value)
category_id_rd = str(int(id_sheet_rd.cell(index_now,0).value))
id_wbk_wt = copy(id_wbk_rd)
dt_wbk_rd = xlrd.open_workbook("ProductDetails.xls")
dt_wbk_wt = copy(dt_wbk_rd)
sheet_0 = dt_wbk_wt.get_sheet('Index')
info = get_info_of_one_category(category_id_rd)
if info == None:
index_now = index_now + 1
id_sheet_nml = id_wbk_wt.get_sheet('CategoryID')
id_sheet_nml.write(0,1,index_now)
id_wbk_wt.save('CategoryID.xls')
dt_wbk_wt.save('ProductDetails.xls')
if index_now == index_end:
break
else:
continue
elif len(info) == 1:
id_sheet_err = id_wbk_wt.get_sheet('ErrorCategory')
id_sheet_err.write(row_err,0,info[0])
row_err = row_err+1
else:
ASINs = info[0]
Prices = info[1]
Stars = info[2]
Reviews = info[3]
Images = info[4]
Titles = info[5]
sheet_0.write(0,row_nml+1,category_id_rd)
sheet_0.write(1,row_nml+1,0)
sheet_0.write(2,row_nml+1,len(ASINs))
sheet_0.write(3,row_nml+1,1)
sheet_0.write(4,row_nml+1,0)
sheet_2 = dt_wbk_wt.get_sheet('AllASIN')
new_sheet = dt_wbk_wt.add_sheet(category_id_rd)
sheet_2.write(0,6*row_nml,category_id_rd)
for i in range(0,len(ASINs)):
sheet_2.write(i+1,6*row_nml,ASINs[i])
sheet_2.write(i+1,6*row_nml+1,Prices[i])
sheet_2.write(i+1,6*row_nml+2,Stars[i])
sheet_2.write(i+1,6*row_nml+3,Reviews[i])
sheet_2.write(i+1,6*row_nml+4,Images[i].decode('utf-8'))
sheet_2.write(i+1,6*row_nml+5,Titles[i].decode('utf-8'))
for j in range(0,len(pd_title)):
new_sheet.write(0,j,pd_title[j])
row_nml = row_nml + 1
id_sheet_nml = id_wbk_wt.get_sheet('CategoryID')
index_now = index_now + 1
id_sheet_nml.write(0,1,index_now)
id_sheet_nml.write(2,1,row_nml)
id_sheet_nml.write(3,1,row_err)
id_wbk_wt.save('CategoryID.xls')
sheet_0.write(0,0,'id_index')
sheet_0.write(1,0,0)
sheet_0.write(2,0,row_nml)
sheet_0.write(3,0,0)
dt_wbk_wt.save('ProductDetails.xls')
if index_now == index_end:
break
#info = get_info_of_one_category('511228')
#print(info)
#print(len(info[0]))
get_info_of_all_category()
|
import os
rpath=input("Enter your directory: ")
if os.path.isfile(rpath):
print(f'the given {rpath} is a file. Please pass directory only')
else:
listdire=os.listdir(rpath)
if len(listdire)>0:
ext=input("Required files extention .py/.sh/.bat/.log/.txt: ")
allfiles=[]
for eachfile in listdire:
if eachfile.endswith(ext):
allfiles.append(eachfile)
if len(allfiles)>0:
print(f'The directory consists of {len(allfiles)} number of matching files out of {len(listdire)} and the files are {allfiles}')
else:
print(f'no matching files with extention {ext} in the location {rpath}')
else:
print("directory is empty, please provide the directory which consiste of files")
|
import numpy as np
from math import pi
from gdshelpers.geometry.chip import Cell
from gdshelpers.parts.waveguide import Waveguide
from gdshelpers.parts.coupler import GratingCoupler
from gdshelpers.parts.resonator import RingResonator
from gdshelpers.layout import GridLayout
from gdshelpers.parts.marker import CrossMarker
from gdshelpers.parts.marker import SquareMarker
from gdshelpers.helpers.positive_resist import convert_to_positive_resist
from gdshelpers.parts.port import Port
from shapely.geometry import Polygon
from gdshelpers.geometry import geometric_union
from gdshelpers.helpers.under_etching import create_holes_for_under_etching
from gdshelpers.geometry.ebl_frame_generators import raith_marker_frame
from gdshelpers.parts.text import Text
#The grating ff is the maximum duty cycle of the grating coupler
#ap_max_ff is the minimum duty cycle of the grating coupler
#Visible
#minimum_duty = 0.85
#maximum_duty = [0.90, 0.91, 0.92, 0.93, 0.94]
#n_grats = 20
#grating_pitch = [0.54, 0.57, 0.60, 0.63, 0.66]
#1550nm (best performance max_duty=0.80 grating_pitch=0.76)
#maximum_duty = np.linspace(0.75, 0.82, num = 5)
#grating_pitch = np.linspace(0.75, 0.77, num= 10)
visible_coup_param = {
'width': 0.22,
'full_opening_angle': np.deg2rad(30),
'grating_period': 0.60,
'grating_ff': 0.85,
'ap_max_ff': 0.92,
'n_gratings': 20,
'taper_length': 16,
'n_ap_gratings': 20,
}
chang_min_coupler_params = {
'width': 0.22,
'full_opening_angle': np.deg2rad(30), #40
'grating_period': 0.57,
'grating_ff':0.85, #minigap = 30nm
'ap_max_ff':0.92,
'n_gratings': 0, #20
'taper_length': 10, #16um
'n_ap_gratings':20, #20
}
def make_frequency_shifter_waveguide(GC_param, input_angle):
# Create the Optical waveguide and GCs
# Optical GC on the left---------------------------------------------------------------------------
# The waveguide number from left to right "wg3-wg1-wg2-wg4"
incident_waveguide_expand_width = 1
incident_waveguide_width = 0.3
output_waveguide_fin_width = incident_waveguide_width
incident_angle = input_angle * np.pi / 180
Initial_angle = np.pi - incident_angle
wg1 = Waveguide.make_at_port(Port((-30, 150), angle=Initial_angle, width=[3, 5, 3]))
wg1.add_straight_segment(length=100, final_width=[3, incident_waveguide_expand_width, 3])
wg1.add_straight_segment(length=50, final_width=[3, incident_waveguide_width, 3])
wg1.add_bend(-pi, radius=50)
wg1.add_straight_segment(length=49) #Straight wg before bending up
wg1.add_bend(pi / 2 + incident_angle, radius=50)
wg1_to_GC = Waveguide.make_at_port(wg1.current_port, width=incident_waveguide_width)
wg1_to_GC.add_straight_segment(length=5)
wg1.add_straight_segment(length=5)
GC2 = GratingCoupler.make_traditional_coupler_at_port(wg1.current_port, **GC_param)
wg1_shapely = wg1.get_shapely_object()
GC2_shapely = convert_to_positive_resist(GC2.get_shapely_object(), 5)
wg1_to_GC = wg1_to_GC.get_shapely_object()
Input_2 = wg1_shapely.union(GC2_shapely)
Input_2 = Input_2.difference(wg1_to_GC)
# -----------------------------------------------------------------------------------------------
# Optical GC on the right-----------------------------------------------------------------------
# left_coupler = GratingCoupler.make_traditional_coupler(origin, angle=0, **coupler_params)
wg2 = Waveguide.make_at_port(Port((-30 + 250, 150), angle=incident_angle, width=[3, 15, 3]))
wg2.add_straight_segment(length=100, final_width=[3, incident_waveguide_expand_width, 3])
wg2.add_straight_segment(length=50, final_width=[3, incident_waveguide_width, 3])
wg2.add_bend(pi, radius=50)
wg2.add_straight_segment(length=50) #Straight wg before bending up
wg2.add_bend(-pi / 2 - incident_angle, radius=49)
wg2_to_GC = Waveguide.make_at_port(wg2.current_port, width=incident_waveguide_width)
wg2_to_GC.add_straight_segment(length=5)
wg2.add_straight_segment(length=5)
GC3 = GratingCoupler.make_traditional_coupler_at_port(wg2.current_port, **GC_param)
#
wg2_shapely = wg2.get_shapely_object()
GC3_shapely = convert_to_positive_resist(GC3.get_shapely_object(), 5)
wg2_to_GC = wg2_to_GC.get_shapely_object()
Input_3 = wg2_shapely.union(GC3_shapely)
Input_3 = Input_3.difference(wg2_to_GC)
# Extended waveguide for the other side's waveguide
wg1_extend = Waveguide.make_at_port(Port((-30, 150), angle=-incident_angle, width=[3, 1.5, 3]))
wg1_extend.add_straight_segment(length=250, final_width=[3, 15, 3])
wg2_extend = Waveguide.make_at_port(Port((-30 + 250, 150), angle=np.pi + incident_angle, width=[3, 3, 3]))
wg2_extend.add_straight_segment(length=250, final_width=[3, 5, 3])
# WG on the bottom left
wg3 = Waveguide.make_at_port(wg2_extend.current_port)
wg3.add_straight_segment(length=188, final_width=[3, output_waveguide_fin_width, 3])
wg3.add_bend(-pi / 2 - incident_angle, radius=63.5)
wg3.add_straight_segment(length=248, final_width=[3, output_waveguide_fin_width, 3]) # LAST Straight to GC
wg3_to_GC = Waveguide.make_at_port(wg3.current_port, width=output_waveguide_fin_width)
wg3_to_GC.add_straight_segment(length=5)
wg3.add_straight_segment(length=5)
GC1 = GratingCoupler.make_traditional_coupler_at_port(wg3.current_port, **GC_param)
wg3_shapely = wg3.get_shapely_object()
GC1_shapely = convert_to_positive_resist(GC1.get_shapely_object(), 5)
wg3_to_GC = wg3_to_GC.get_shapely_object()
Input_1 = wg3_shapely.union(GC1_shapely)
Input_1 = Input_1.difference(wg3_to_GC)
# Driect transmission without deflection
wg4 = Waveguide.make_at_port(wg1_extend.current_port)
wg4.add_straight_segment(length=188, final_width=[3, output_waveguide_fin_width, 3])
wg4.add_bend(pi / 2 + incident_angle, radius=63.5)
wg4.add_straight_segment(length=248, final_width=[3, output_waveguide_fin_width, 3])
wg4_to_GC = Waveguide.make_at_port(wg4.current_port, width=output_waveguide_fin_width)
wg4_to_GC.add_straight_segment(length=5)
wg4.add_straight_segment(length=5)
GC4 = GratingCoupler.make_traditional_coupler_at_port(wg4.current_port, **GC_param)
wg4_shapely = wg4.get_shapely_object()
GC4_shapely = convert_to_positive_resist(GC4.get_shapely_object(), 5)
wg4_to_GC = wg4_to_GC.get_shapely_object()
Input_4 = wg4_shapely.union(GC4_shapely)
Input_4 = Input_4.difference(wg4_to_GC)
return Input_1, Input_2, Input_3, Input_4, wg1_extend, wg2_extend
def make_IDT_Fingers(figer_widths, number_of_period, IDT_Aperature, ZnO_Top_left):
# Creat the IDT
# Change parameter here:
# Finger characteristics
figer_width = figer_widths
pitch = figer_width * 2.8
Figer_gap_offset = (figer_width + pitch) / 2 + figer_width / 2
# How many pairs of IDT fingers
number_of_period = int(number_of_period)
how_many_period = number_of_period
radius = how_many_period / 1.7
# Finger coordinate (correction of different IDT aperature)
Finger_origin_x = ZnO_Top_left + 5 + 7 # last two term = figers offset (5um) + small_pad_width_with extended(10um)
Finger_origin_y = -5
# Finger offset on the other side of the horn structure
Finger_left_offset = 298
Finger_length = IDT_Aperature
# Pad coordinate
arm2_right_pad_Offset = Finger_length + 4.5
pad_length = how_many_period * 2
Right_IDT_final_Angel = np.pi / 4 + np.pi / 25 # angel decrease, the right exposed finger is shorter
Left_IDT_final_angel = - np.pi / 4.7 # angle decress, the left exposed finger is shorter #Demominator has to bigger than 4
top_right = -1 * Right_IDT_final_Angel
top_left = -1 * Left_IDT_final_angel
# Below DO NOT CHANGE ------------------------------------------------------------------------------
one_period_arm2 = [figer_width, figer_width + pitch]
Idt_finger_arm2 = []
for i in range(how_many_period):
Idt_finger_arm2.extend(one_period_arm2)
# IDT ON THE "right" GRATING COUPLER
# Finger_lower is lower idt fingers, Finger_upper is upper IDT finger (on the horn on the right)
Finger_lower = Waveguide.make_at_port(
Port(origin=(Finger_origin_x - 5 + Finger_length, Finger_origin_y), angle=np.pi, width=Idt_finger_arm2))
Finger_lower.add_straight_segment(length=Finger_length)
Finger_upper = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + Finger_length, Finger_origin_y + Figer_gap_offset), angle=np.pi,
width=Idt_finger_arm2))
Finger_upper.add_straight_segment(length=Finger_length)
# SAME IDT ON THE "left" GRATING COUPLER ---------------------------------------------------------------------------------------------------
# Finger_lower_other_side is left IDT finger, wg_2 is right IDT finger
# Finger_lower_other_side = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset , Finger_origin_y-5), angle=np.pi/2, width=Idt_finger_arm2))
# Finger_lower_other_side.add_straight_segment(length = Finger_length)
# Finger_upper_other_side = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset + Figer_gap_offset, Finger_origin_y ), angle=np.pi / 2, width=Idt_finger_arm2))
# Finger_upper_other_side.add_straight_segment(length=Finger_length)
# Make Small metal pad
# outer_corners_arm2_1 = [
# (Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset, Finger_origin_y - 10),
# (Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset, Finger_origin_y - 5),
# (Finger_origin_x - pad_length * 1 * (figer_width + pitch) - Finger_left_offset, Finger_origin_y - 5),
# (Finger_origin_x - pad_length * 1 * (figer_width + pitch) - Finger_left_offset, Finger_origin_y - 10)]
# outer_corners_arm2_2 = [(Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset,
# Finger_origin_y + arm2_right_pad_Offset),
# (Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset,
# Finger_origin_y + arm2_right_pad_Offset - 5),
# (Finger_origin_x - pad_length * 1 * (figer_width + pitch) - Finger_left_offset,
# Finger_origin_y + arm2_right_pad_Offset - 5),
# (Finger_origin_x - pad_length * 1 * (figer_width + pitch) - Finger_left_offset,
# Finger_origin_y + arm2_right_pad_Offset)]
#
#--------------------------------------------------------------------------------------------------------------
#Left small pad------------------------------------------------------------------------------------------------
outer_corners_arm1_1 = [(Finger_origin_x - 9, Finger_origin_y - pad_length * (figer_width + pitch) / 2 ), #Bot-left corner
(Finger_origin_x - 4, Finger_origin_y - pad_length * (figer_width + pitch) / 2 ), #Bot-Right corner
(Finger_origin_x - 4, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (number_of_period-26) - 16 ), #Top-Right corner
(Finger_origin_x - 9, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (number_of_period-26) - 16 ) #Top-left corner
]
#Left Big pad
Outer_corners_Big_pad1_1 = [(Finger_origin_x - 9, Finger_origin_y - pad_length * (figer_width + pitch) / 2 ), #Bot-left corner
(Finger_origin_x - 4, Finger_origin_y - pad_length * (figer_width + pitch) / 2 ), #Bot-Right corner
(Finger_origin_x - 4, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (number_of_period-26) - 16 ), #Top-Right corner
(Finger_origin_x - 9, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (number_of_period-26) - 16 ) #Top-left corner
]
# --------------------------------------------------------------------------------------------------------------
# Right small pad-----------------------------------------------------------------------------------------------
outer_corners_arm1_2 = [
(Finger_origin_x + arm2_right_pad_Offset - 0, Finger_origin_y - pad_length * (figer_width + pitch) / 2 ), #Bot-right corner
(Finger_origin_x + arm2_right_pad_Offset - 5, Finger_origin_y - pad_length * (figer_width + pitch) / 2 ), #Bot-left corner
(Finger_origin_x + arm2_right_pad_Offset - 5, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (number_of_period-26) - 16 ), #Top-left corner
(Finger_origin_x + arm2_right_pad_Offset - 0, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (number_of_period-26) - 16 )] #Top-right corner
# small_pad_arm2_1 = Polygon(outer_corners_arm2_1)
# small_pad_arm2_2 = Polygon(outer_corners_arm2_2)
small_pad_arm1_1 = Polygon(outer_corners_arm1_1)
small_pad_arm1_2 = Polygon(outer_corners_arm1_2)
return Finger_lower, Finger_upper ,small_pad_arm1_1 ,small_pad_arm1_2
def make_IDT_Fingers_v2(figer_widths, number_of_pairs, IDT_Aperature, ZnO_Top_left):
# Creat the IDT
# Change parameter here:
# How many pairs of IDT fingers
number_of_pairs = int(number_of_pairs)
how_many_period = number_of_pairs
# Finger coordinate (correction of different IDT aperature)
Finger_origin_x = ZnO_Top_left + 5 + 7 # last two term = figers offset (5um) + small_pad_width_with extended(10um)
Finger_origin_y = -5
# Finger offset on the other side of the horn structure
Finger_left_offset = 298
Finger_length = IDT_Aperature
# Pad coordinate
arm2_right_pad_Offset = Finger_length + 4.5
pad_length = how_many_period * 2
Right_IDT_final_Angel = np.pi / 4 + np.pi / 25 # angel decrease, the right exposed finger is shorter
Left_IDT_final_angel = - np.pi / 4.7 # angle decress, the left exposed finger is shorter #Demominator has to bigger than 4
# Below DO NOT CHANGE ------------------------------------------------------------------------------
#one_period_arm2 = [finger_width, finger_width + pitch]
Idt_finger_arm2 = []
start_width =0.17
end_width = 0.19
chirp_widths = np.linspace(start_width, end_width, num= number_of_pairs )
chirp_widths = chirp_widths[::-1]
average_chirp_finger_width = (start_width + end_width)/2
average_chirp_finger_pitch = average_chirp_finger_width*2.8
Chirped_finger_gap_offsets = (average_chirp_finger_width + average_chirp_finger_pitch)/2 + average_chirp_finger_width/2
#for i in range(how_many_period):
for chirp_width in chirp_widths:
chirp_pitch = chirp_width*2.8
one_period_chirp_arm = [chirp_width, chirp_width + chirp_pitch]
Idt_finger_arm2.extend(one_period_chirp_arm)
# IDT ON THE "right" GRATING COUPLER
# Finger_lower is lower idt fingers, Finger_upper is upper IDT finger (on the horn on the right)
Finger_lower = Waveguide.make_at_port(
Port(origin=(Finger_origin_x - 5 + Finger_length, Finger_origin_y), angle=np.pi, width=Idt_finger_arm2))
Finger_lower.add_straight_segment(length=Finger_length)
Finger_upper = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + Finger_length, Finger_origin_y + Chirped_finger_gap_offsets), angle=np.pi,
width=Idt_finger_arm2))
Finger_upper.add_straight_segment(length=Finger_length)
# Left small pad------------------------------------------------------------------------------------------------
finger_width = average_chirp_finger_width
pitch = average_chirp_finger_pitch
outer_corners_arm1_1 = [(Finger_origin_x - 9, Finger_origin_y - pad_length * (finger_width + pitch) / 2),
# Bot-left corner
(Finger_origin_x - 4, Finger_origin_y - pad_length * (finger_width + pitch) / 2),
# Bot-Right corner
(Finger_origin_x - 4,
Finger_origin_y + pad_length * (finger_width + pitch) / 1 - (number_of_pairs - 26) - 16),
# Top-Right corner
(Finger_origin_x - 9,
Finger_origin_y + pad_length * (finger_width + pitch) / 1 - (number_of_pairs - 26) - 16)
# Top-left corner
]
bot_right_y = Finger_origin_y - pad_length * (finger_width + pitch) / 2 + 20
# Left Big pad
X_tr = Finger_origin_x - 4
Y_tr = bot_right_y # TOP_RIGHT
X_tl = Finger_origin_x - 4 - 100
Y_tl = bot_right_y # TOP_LEFT
X_br = Finger_origin_x - 4
Y_br = bot_right_y - 150 # BOT_RIGHT
X_bl = Finger_origin_x - 4 - 100
Y_bl = bot_right_y - 150 # BOT_LEFT:
Outer_corners_Big_pad1_1 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl, Y_bl),
(X_br, Y_br)
]
# Right small pad-----------------------------------------------------------------------------------------------
outer_corners_arm1_2 = [
(Finger_origin_x + arm2_right_pad_Offset - 0, Finger_origin_y - pad_length * (finger_width + pitch) / 2),
# Bot-right corner
(Finger_origin_x + arm2_right_pad_Offset - 5, Finger_origin_y - pad_length * (finger_width + pitch) / 2),
# Bot-left corner
(Finger_origin_x + arm2_right_pad_Offset - 5,
Finger_origin_y + pad_length * (finger_width + pitch) / 1 - (number_of_pairs - 26) - 16), # Top-left corner
(Finger_origin_x + arm2_right_pad_Offset - 0,
Finger_origin_y + pad_length * (finger_width + pitch) / 1 - (number_of_pairs - 26) - 16)] # Top-right corner
#Right big pad--------------------------------------------------------------------------------------------------
bot_right_y = Finger_origin_y - pad_length * (finger_width + pitch) / 2 + 20
# Left Big pad
X_tl = Finger_origin_x + arm2_right_pad_Offset - 5
Y_tl = bot_right_y # TOP_RIGHT
X_tr = Finger_origin_x + arm2_right_pad_Offset - 5 + 300
Y_tr = bot_right_y # TOP_LEFT
X_br = Finger_origin_x + arm2_right_pad_Offset - 5 + 300
Y_br = bot_right_y - 150 # BOT_RIGHT
X_bl = Finger_origin_x + arm2_right_pad_Offset - 5
Y_bl = bot_right_y - 150 # BOT_LEFT:
Outer_corners_Big_pad1_2 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl, Y_bl),
(X_br, Y_br)
]
small_pad_arm1_1 = Polygon(outer_corners_arm1_1)
Big_pad1_1 = Polygon(Outer_corners_Big_pad1_1)
small_pad_arm1_2 = Polygon(outer_corners_arm1_2)
Big_pad1_2 = Polygon(Outer_corners_Big_pad1_2)
return Finger_lower, Finger_upper, small_pad_arm1_1, small_pad_arm1_2, Big_pad1_1, Big_pad1_2
def make_IDT_Fingers_pairs(figer_widths, number_of_pairs, IDT_Aperature, ZnO_Top_left, prop_length):
# Creat the IDT
# Change parameter here:
# Finger characteristics
figer_width = figer_widths
pitch = figer_width * 2.8
Figer_gap_offset = (figer_width + pitch) / 2 + figer_width / 2
# How many pairs of IDT fingers
number_of_pairs = int(number_of_pairs)
how_many_period = number_of_pairs
radius = how_many_period / 1.7
# Finger coordinate (correction of different IDT aperature)
Finger_origin_x = ZnO_Top_left + 5 + 7 # last two term = figers offset (5um) + small_pad_width_with extended(10um)
Finger_origin_y = -5
# Finger offset on the other side of the horn structure
Finger_left_offset = prop_length
Finger_length = IDT_Aperature
# Pad coordinate
arm2_right_pad_Offset = Finger_length + 4.5
pad_length = how_many_period * 2
Right_IDT_final_Angel = np.pi / 4 + np.pi / 25 # angel decrease, the right exposed finger is shorter
Left_IDT_final_angel = - np.pi / 4.7 # angle decress, the left exposed finger is shorter #Demominator has to bigger than 4
top_right = -1 * Right_IDT_final_Angel
top_left = -1 * Left_IDT_final_angel
# Below DO NOT CHANGE ------------------------------------------------------------------------------
#one_period_arm2 = [figer_width, figer_width + pitch]
Idt_finger_arm2 = []
start_width = 0.17
end_width = 0.19
chirp_widths = np.linspace(start_width, end_width, num=number_of_pairs)
chirp_widths = chirp_widths[::-1]
average_chirp_finger_width = (start_width + end_width) / 2
average_chirp_finger_pitch = average_chirp_finger_width * 2.8
Chirped_finger_gap_offsets = (average_chirp_finger_width + average_chirp_finger_pitch) / 2 + average_chirp_finger_width / 2
# for i in range(how_many_period):
for chirp_width in chirp_widths:
chirp_pitch = chirp_width * 2.8
one_period_chirp_arm = [chirp_width, chirp_width + chirp_pitch]
Idt_finger_arm2.extend(one_period_chirp_arm)
# IDT ON THE "right" GRATING COUPLER
# Finger_lower is lower idt fingers, Finger_upper is upper IDT finger (on the horn on the right)
Finger_lower = Waveguide.make_at_port(
Port(origin=(Finger_origin_x , Finger_origin_y - 5), angle=np.pi/2, width=Idt_finger_arm2))
Finger_lower.add_straight_segment(length=Finger_length)
Finger_upper = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + Chirped_finger_gap_offsets, Finger_origin_y ), angle=np.pi/2,
width=Idt_finger_arm2))
Finger_upper.add_straight_segment(length=Finger_length)
# SAME IDT ON THE "left" GRATING COUPLER ---------------------------------------------------------------------------------------------------
# Finger_lower_other_side is left IDT finger, wg_2 is right IDT finger
Finger_lower_other_side = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset , Finger_origin_y-5), angle=np.pi/2, width=Idt_finger_arm2))
Finger_lower_other_side.add_straight_segment(length = Finger_length)
Finger_upper_other_side = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset + Chirped_finger_gap_offsets, Finger_origin_y ), angle=np.pi / 2, width=Idt_finger_arm2))
Finger_upper_other_side.add_straight_segment(length=Finger_length)
# Make Small metal pad
shift_2 = 10
outer_corners_arm2_1 = [
(Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset - (number_of_pairs - 34) + shift_2 , Finger_origin_y - 10),
(Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset - (number_of_pairs - 34) + shift_2 , Finger_origin_y - 5),
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - Finger_left_offset + shift_2, Finger_origin_y - 5),
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - Finger_left_offset + shift_2, Finger_origin_y - 10)]
outer_corners_arm2_2 = [(Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset - (number_of_pairs - 34) + shift_2 ,
Finger_origin_y + arm2_right_pad_Offset),
(Finger_origin_x + pad_length * (figer_width + pitch) / 2 - Finger_left_offset - (number_of_pairs - 34) + shift_2,
Finger_origin_y + arm2_right_pad_Offset - 5),
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - Finger_left_offset+ shift_2,
Finger_origin_y + arm2_right_pad_Offset - 5),
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - Finger_left_offset+ shift_2,
Finger_origin_y + arm2_right_pad_Offset)]
# --------------------------------------------------------------------------------------------------------------
# Right_bot small pad------------------------------------------------------------------------------------------------
shift = -10
outer_corners_arm1_1 = [(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - shift, Finger_origin_y - 9),
# Bot-left corner
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - shift, Finger_origin_y - 4),
# Bot-Right corner
(Finger_origin_x + pad_length * (figer_width + pitch) / 1.3 - (number_of_pairs - 26) - 16 - shift, Finger_origin_y - 4) ,
# Top-Right corner
(Finger_origin_x + pad_length * (figer_width + pitch) / 1.3 - (number_of_pairs - 26) - 16 - shift, Finger_origin_y - 9)
# Top-left corner
]
# Right_top small pad-----------------------------------------------------------------------------------------------
outer_corners_arm1_2 = [
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - shift, Finger_origin_y + arm2_right_pad_Offset - 0),
# Bot-right corner
(Finger_origin_x - pad_length * (figer_width + pitch) / 2 - shift, Finger_origin_y + arm2_right_pad_Offset - 5),
# Bot-left corner
(Finger_origin_x + pad_length * (figer_width + pitch) / 1.3 - (number_of_pairs - 26) - 16 - shift,
Finger_origin_y + arm2_right_pad_Offset - 5), # Top-left corner
(Finger_origin_x + pad_length * (figer_width + pitch) / 1.3 - (number_of_pairs - 26) - 16 - shift,
Finger_origin_y + arm2_right_pad_Offset - 0)] # Top-right corner
# Left Big pad
Outer_corners_Big_pad1_1 = [(Finger_origin_x - 9, Finger_origin_y - pad_length * (figer_width + pitch) / 2),
# Bot-left corner
(Finger_origin_x - 4, Finger_origin_y - pad_length * (figer_width + pitch) / 2),
# Bot-Right corner
(Finger_origin_x - 4, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (
number_of_pairs - 26) - 16), # Top-Right corner
(Finger_origin_x - 9, Finger_origin_y + pad_length * (figer_width + pitch) / 1.3 - (
number_of_pairs - 26) - 16) # Top-left corner
]
# --------------------------------------------------------------------------------------------------------------
small_pad_arm2_1 = Polygon(outer_corners_arm2_1)
small_pad_arm2_2 = Polygon(outer_corners_arm2_2)
small_pad_arm1_1 = Polygon(outer_corners_arm1_1)
small_pad_arm1_2 = Polygon(outer_corners_arm1_2)
return Finger_lower, Finger_upper, Finger_lower_other_side, Finger_upper_other_side, small_pad_arm1_1, small_pad_arm1_2 ,small_pad_arm2_1, small_pad_arm2_2
def make_Split_IDT_Fingers_pairs(figer_widths, number_of_pairs, IDT_Aperature, ZnO_Top_left, prop_length):
# Creat the IDT
# Change parameter here:
# Finger characteristics
figer_width = figer_widths
pitch = figer_width * 7
Figer_gap_offset = (figer_width + pitch) / 2
# How many pairs of IDT fingers
number_of_pairs = int(number_of_pairs)
how_many_period = number_of_pairs
radius = how_many_period / 1.7
# Finger coordinate (correction of different IDT aperature)
Finger_origin_x = ZnO_Top_left + 5 + 7 # last two term = figers offset (5um) + small_pad_width_with extended(10um)
Finger_origin_y = -5
# Finger offset on the other side of the horn structure
Finger_left_offset = prop_length
Finger_length = IDT_Aperature
# Pad coordinate
arm2_right_pad_Offset = Finger_length + 4.5
pad_length = how_many_period * 2
Right_IDT_final_Angel = np.pi / 4 + np.pi / 25 # angel decrease, the right exposed finger is shorter
Left_IDT_final_angel = - np.pi / 4.7 # angle decress, the left exposed finger is shorter #Demominator has to bigger than 4
top_right = -1 * Right_IDT_final_Angel
top_left = -1 * Left_IDT_final_angel
# Below DO NOT CHANGE ------------------------------------------------------------------------------
#one_period_arm2 = [figer_width, figer_width + pitch]
Idt_finger_arm2 = []
one_period_arm2 = [figer_width, pitch]
for i in range(how_many_period):
Idt_finger_arm2.extend(one_period_arm2)
# IDT ON THE "right" GRATING COUPLER
# Finger_lower1 is lower idt fingers, Finger_upper1 is upper IDT finger (on the horn on the right)
Finger_lower1 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x , Finger_origin_y - 5), angle=np.pi/2, width=Idt_finger_arm2))
Finger_lower1.add_straight_segment(length=Finger_length)
Finger_lower2 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + 2*figer_width, Finger_origin_y - 5), angle=np.pi/2, width=Idt_finger_arm2))
Finger_lower2.add_straight_segment(length=Finger_length)
Finger_upper1 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + Figer_gap_offset, Finger_origin_y ), angle=np.pi/2,
width=Idt_finger_arm2))
Finger_upper1.add_straight_segment(length=Finger_length)
Finger_upper2 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + Figer_gap_offset + 2*figer_width, Finger_origin_y), angle=np.pi / 2,
width=Idt_finger_arm2))
Finger_upper2.add_straight_segment(length=Finger_length)
# SAME IDT ON THE "left" GRATING COUPLER ---------------------------------------------------------------------------------------------------
# Finger_lower_other_side is left IDT finger, wg_2 is right IDT finger
Finger_lower_other_side1 = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset , Finger_origin_y-5), angle=np.pi/2, width=Idt_finger_arm2))
Finger_lower_other_side1.add_straight_segment(length = Finger_length)
Finger_lower_other_side2 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + 2*figer_width - Finger_left_offset, Finger_origin_y - 5), angle=np.pi / 2,
width=Idt_finger_arm2))
Finger_lower_other_side2.add_straight_segment(length=Finger_length)
Finger_upper_other_side1 = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset + Figer_gap_offset, Finger_origin_y ), angle=np.pi / 2, width=Idt_finger_arm2))
Finger_upper_other_side1.add_straight_segment(length=Finger_length)
Finger_upper_other_side2 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + 2*figer_width - Finger_left_offset + Figer_gap_offset, Finger_origin_y), angle=np.pi / 2,
width=Idt_finger_arm2))
Finger_upper_other_side2.add_straight_segment(length=Finger_length)
# Make Small metal pad
shift_2 = number_of_pairs*(pitch)/1.5
right_small_pad_right_offset = shift_2 / 10
left_small_pad_left_offset = right_small_pad_right_offset
#Left_bot
outer_corners_arm2_1 = [
(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 10),
(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 10)]
# Left_top
outer_corners_arm2_2 = [(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset),
(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset)]
# --------------------------------------------------------------------------------------------------------------
# Right_bot small pad------------------------------------------------------------------------------------------------
outer_corners_arm1_1 = [(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y - 9),
# Bot-left corner
(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y - 4),
# Bot-left corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y - 4) ,
# Top-Right corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y - 9)
# Top-left corner
]
# Right_top small pad-----------------------------------------------------------------------------------------------
outer_corners_arm1_2 = [
(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 0),
# Bot-right corner
(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 5),
# Bot-left corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 5), # Top-left corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 0)] # Top-right corner
# Left Big pad bot
bot_left_y = Finger_origin_y - 5
bot_left_x = Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset
X_tr = bot_left_x
Y_tr = bot_left_y # TOP_RIGHT
X_tl = X_tr - 300
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = Y_tl - 50 # BOT_left extend to middle upper point
X_bl_ext2 = X_bl_ext1 + 120
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl = X_bl_ext2
Y_bl = Y_bl_ext2 - 150 # BOT_LEFT:
X_br = X_tr
Y_br = Y_bl # BOT_RIGHT
buffer_cord_x = X_tl
buffer_cord_y = Y_tl
buffer_cord_x2 = X_bl_ext2
buffer_cord_y2 = Y_bl_ext1
Outer_corners_Big_pad1_1 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl, Y_bl),
(X_br, Y_br)
]
# Left Big pad top
top_right_y = Finger_origin_y + arm2_right_pad_Offset
top_right_x = Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset
X_tr = top_right_x
Y_tr = top_right_y # TOP_RIGHT
X_tl = buffer_cord_x - 50
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = buffer_cord_y2 - 150 # BOT_left extend to middle upper point
X_bl_ext2 = buffer_cord_x2 - 10
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl_ext3 = X_bl_ext2
Y_bl_ext3 = buffer_cord_y2 - 10 # BOT_RIGHT extend to middle lower point
X_bl_ext4 = buffer_cord_x - 10
Y_bl_ext4 = Y_bl_ext3
X_bl = X_bl_ext4
Y_bl = buffer_cord_y + 10 # BOT_LEFT:
X_br = X_tr
Y_br = Y_bl # BOT_RIGHT
Outer_corners_Big_pad1_2 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl_ext3, Y_bl_ext3),
(X_bl_ext4, Y_bl_ext4),
(X_bl, Y_bl),
(X_br, Y_br)
]
#-----Right big pad bot
bot_right_x = Finger_origin_x + shift_2 + right_small_pad_right_offset
bot_right_y = Finger_origin_y - 4
X_tr = bot_right_x
Y_tr = bot_right_y # TOP_RIGHT
X_tl = X_tr + 300
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = Y_tl - 50 # BOT_left extend to middle upper point
X_bl_ext2 = X_bl_ext1 - 120
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl = X_bl_ext2
Y_bl = Y_bl_ext2 - 150 # BOT_LEFT:
X_br = X_tr
Y_br = Y_bl # BOT_RIGHT
buffer_cord_x = X_tl
buffer_cord_y = Y_tl
buffer_cord_x2 = X_bl_ext2
buffer_cord_y2 = Y_bl_ext1
Outer_corners_Big_pad2_1 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl, Y_bl),
(X_br, Y_br)
]
#Right top big pad
top_right_y = Finger_origin_y + arm2_right_pad_Offset - 0
top_right_x = bot_right_x
X_tr = top_right_x
Y_tr = top_right_y # TOP_RIGHT
X_tl = buffer_cord_x + 50
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = buffer_cord_y2 - 150 # BOT_left extend to middle upper point
X_bl_ext2 = buffer_cord_x2 + 10
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl_ext3 = X_bl_ext2
Y_bl_ext3 = buffer_cord_y2 - 10 # BOT_RIGHT extend to middle lower point
X_bl_ext4 = buffer_cord_x + 10
Y_bl_ext4 = Y_bl_ext3
X_bl = X_bl_ext4
Y_bl = buffer_cord_y + 10 # BOT_LEFT:
X_br = X_tr
Y_br = Y_bl # BOT_RIGHT
Outer_corners_Big_pad2_2 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl_ext3, Y_bl_ext3),
(X_bl_ext4, Y_bl_ext4),
(X_bl, Y_bl),
(X_br, Y_br)
]
# --------------------------------------------------------------------------------------------------------------
small_pad_arm2_1 = Polygon(outer_corners_arm2_1)
small_pad_arm2_2 = Polygon(outer_corners_arm2_2)
small_pad_arm1_1 = Polygon(outer_corners_arm1_1)
small_pad_arm1_2 = Polygon(outer_corners_arm1_2)
Big_pad1_1 = Polygon(Outer_corners_Big_pad1_1)
Big_pad1_2 = Polygon(Outer_corners_Big_pad1_2)
Big_pad2_1 = Polygon(Outer_corners_Big_pad2_1)
Big_pad2_2 = Polygon(Outer_corners_Big_pad2_2)
return Finger_lower1, Finger_lower2, Finger_upper1, Finger_upper2, \
Finger_lower_other_side1, Finger_lower_other_side2, Finger_upper_other_side1, Finger_upper_other_side2, \
small_pad_arm1_1, small_pad_arm1_2 ,small_pad_arm2_1, small_pad_arm2_2, \
Big_pad1_1, Big_pad1_2, Big_pad2_1, Big_pad2_2
def make_Chirp_IDT_Fingers_pairs(figer_widths, number_of_pairs, IDT_Aperature, prop_length):
# Creat the IDT
# Change parameter here:
# How many pairs of IDT fingers
number_of_pairs = int(number_of_pairs)
how_many_period = number_of_pairs
# Finger coordinate (correction of different IDT aperature)
Finger_origin_x = 0 # last two term = figers offset (5um) + small_pad_width_with extended(10um)
Finger_origin_y = 0
# Finger offset on the other side of the horn structure
Finger_length = IDT_Aperature
Finger_left_offset = prop_length
# Pad coordinate
arm2_right_pad_Offset = Finger_length + 4.5
pad_length = how_many_period * 2
# Below DO NOT CHANGE ------------------------------------------------------------------------------
# one_period_arm2 = [finger_width, finger_width + pitch]
Idt_finger_arm1 = []
Idt_finger_arm2 = []
number_of_widths = 5
start_width = 0.17
end_width = 0.17
litho_corrected_width = 0.005 #0.2 for 1um
chirp_widths = np.linspace(start_width,end_width,number_of_widths)
chirp_widths = chirp_widths[::-1]
for index, width in enumerate(chirp_widths):
pitch = 3 * width
one_period_arm1 = [width - litho_corrected_width, pitch + litho_corrected_width]
one_period_arm2 = one_period_arm1
# Make sure the number of pairs can be equally devided into number of widths
if (number_of_pairs / number_of_widths).is_integer() == True:
devided_pairs = int(number_of_pairs / number_of_widths)
for i in range(devided_pairs):
# Change the period of the last pair in this pair group to the next pair's period
if i == devided_pairs - 1 and index != len(chirp_widths) - 1:
pitch1 = chirp_widths[index + 1] * 3 # pitch for the last ending finger
pitch2 = width * 2 + chirp_widths[index + 1] * 1 # pitch for the second last finger
one_period_arm1 = [width - litho_corrected_width, pitch1 + litho_corrected_width]
one_period_arm2 = [width - litho_corrected_width, pitch2 + litho_corrected_width]
Idt_finger_arm1.extend(one_period_arm1)
Idt_finger_arm2.extend(one_period_arm2)
else:
Idt_finger_arm1.extend(one_period_arm1)
Idt_finger_arm2.extend(one_period_arm2)
else:
print("%0.1f pairs can not be evenly devided into %0.1f different widths" % (
number_of_pairs, number_of_widths))
break
# Below DO NOT CHANGE ------------------------------------------------------------------------------
# IDT ON THE "right" GRATING COUPLER
# Finger_lower1 is lower idt fingers, Finger_upper1 is upper IDT finger (on the horn on the right)
# If you want to make chirp fingers, add 0.02 to the finger_upper1 line e.g. Finger_origin_x + 2*(chirp_widths[-1]) +0.02
# Funger_upper_other_side1 = Finger_origin_x - Finger_left_offset - 2*(chirp_widths[-1]) - 0.02
Finger_lower1 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x , Finger_origin_y - 5 + IDT_Aperature), angle=-np.pi/2, width=Idt_finger_arm1))
Finger_lower1.add_straight_segment(length=Finger_length)
Finger_upper1 = Waveguide.make_at_port(
Port(origin=(Finger_origin_x + 2*(chirp_widths[-1]) , Finger_origin_y + IDT_Aperature), angle=-np.pi/2,
width=Idt_finger_arm2))
Finger_upper1.add_straight_segment(length=Finger_length)
# SAME IDT ON THE "left" side ---------------------------------------------------------------------------------------------------
# Finger_lower_other_side is left IDT finger, wg_2 is right IDT finger
Finger_lower_other_side1 = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset , Finger_origin_y-5), angle=np.pi/2, width=Idt_finger_arm1))
Finger_lower_other_side1.add_straight_segment(length = Finger_length)
Finger_upper_other_side1 = Waveguide.make_at_port(Port(origin=(Finger_origin_x - Finger_left_offset - 2*(chirp_widths[-1]), Finger_origin_y ), angle=np.pi / 2, width=Idt_finger_arm2))
Finger_upper_other_side1.add_straight_segment(length=Finger_length) #+ - 0.4 for 1um finger
# Make Small metal pad
average_chirp_finger_width = (start_width + end_width) / 2
average_chirp_finger_pitch = average_chirp_finger_width * 4
Chirped_finger_gap_offsets = (average_chirp_finger_width + average_chirp_finger_pitch) / 2 + average_chirp_finger_width / 2
shift_2 = number_of_pairs*(average_chirp_finger_pitch)/1.5
right_small_pad_right_offset = shift_2 / 10
left_small_pad_left_offset = right_small_pad_right_offset
#Left_bot
outer_corners_arm2_1 = [
(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 10),
(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset, Finger_origin_y - 10)]
# Left_top
outer_corners_arm2_2 = [(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset),
(Finger_origin_x + shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset - 5),
(Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset ,Finger_origin_y + arm2_right_pad_Offset)]
# --------------------------------------------------------------------------------------------------------------
# Right_bot small pad------------------------------------------------------------------------------------------------
outer_corners_arm1_1 = [(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y - 9),
# Bot-left corner
(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y - 4),
# Bot-left corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y - 4) ,
# Top-Right corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y - 9)
# Top-left corner
]
# Right_top small pad-----------------------------------------------------------------------------------------------
outer_corners_arm1_2 = [
(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 0),
# Bot-right corner
(Finger_origin_x - shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 5),
# Bot-left corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 5), # Top-left corner
(Finger_origin_x + shift_2 + right_small_pad_right_offset, Finger_origin_y + arm2_right_pad_Offset - 0)] # Top-right corner
y_mid_IDT = Finger_origin_y + arm2_right_pad_Offset/2
right_top_small_pad_TL_X = Finger_origin_x - shift_2 + right_small_pad_right_offset
right_top_small_pad_TL_Y = Finger_origin_y + arm2_right_pad_Offset - 0
#Make big metal pads
# Left Big pad bot
bot_left_y = Finger_origin_y - 5
bot_left_x = Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset + 2
X_tr = bot_left_x + 50
Y_tr = bot_left_y # TOP_RIGHT
X_tl = X_tr - 300
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = Y_tl - 60 # BOT_left extend to middle upper point
X_bl_ext2 = X_bl_ext1 + 290
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl = X_bl_ext2
Y_bl = Y_bl_ext2 - 150 # BOT_LEFT:
X_br = X_tr + 70
Y_br = Y_bl # BOT_RIGHT
X_br_ext1 = X_br
Y_br_ext1 = Y_br + 100
buffer_cord_x = X_tl
buffer_cord_y = Y_tl
buffer_cord_x2 = X_bl_ext2
buffer_cord_y2 = Y_bl_ext1
buffer_cord_x3 = X_br
Outer_corners_Big_pad1_1 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl, Y_bl),
(X_br, Y_br),
(X_br_ext1, Y_br_ext1)
]
# Left Big pad top
top_right_y = Finger_origin_y + arm2_right_pad_Offset
top_right_x = Finger_origin_x - shift_2 - Finger_left_offset - left_small_pad_left_offset + 2
X_tr = top_right_x
Y_tr = top_right_y # TOP_RIGHT
X_tl = buffer_cord_x - 50
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = buffer_cord_y2 - 150 # BOT_left extend to middle upper point
X_bl_ext2 = buffer_cord_x2 - 10
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl_ext3 = X_bl_ext2
Y_bl_ext3 = buffer_cord_y2 - 10 # BOT_RIGHT extend to middle lower point
X_bl_ext4 = buffer_cord_x - 10
Y_bl_ext4 = Y_bl_ext3
X_bl = X_bl_ext4
Y_bl = buffer_cord_y + 10 # BOT_LEFT:
X_br = X_tr
Y_br = Y_bl # BOT_RIGHT
Outer_corners_Big_pad1_2 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl_ext3, Y_bl_ext3),
(X_bl_ext4, Y_bl_ext4),
(X_bl, Y_bl),
(X_br, Y_br)
]
#-----Right big pad bot
bot_right_x = Finger_origin_x + shift_2 + right_small_pad_right_offset - 2
bot_right_y = Finger_origin_y - 4
X_tr = bot_right_x - 50
Y_tr = bot_right_y # TOP_RIGHT
X_tl = X_tr + 300
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = Y_tl - 60 # BOT_left extend to middle upper point
X_bl_ext2 = X_bl_ext1 - 290
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl = X_bl_ext2
Y_bl = Y_bl_ext2 - 151 # BOT_LEFT:
X_br = buffer_cord_x3 + 10
Y_br = Y_bl # BOT_RIGHT
X_br_ext1 = X_br
Y_br_ext1 = Y_br + 100
buffer_cord_x = X_tl
buffer_cord_y = Y_tl
buffer_cord_x2 = X_bl_ext2
buffer_cord_y2 = Y_bl_ext1
Outer_corners_Big_pad2_1 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl, Y_bl),
(X_br, Y_br),
(X_br_ext1, Y_br_ext1)
]
#Right top big pad
top_right_y = Finger_origin_y + arm2_right_pad_Offset - 0
top_right_x = bot_right_x
X_tr = top_right_x
Y_tr = top_right_y # TOP_RIGHT
X_tl = buffer_cord_x + 50
Y_tl = Y_tr # TOP_LEFT
X_bl_ext1 = X_tl
Y_bl_ext1 = buffer_cord_y2 - 151 # BOT_left extend to middle upper point
X_bl_ext2 = buffer_cord_x2 + 10
Y_bl_ext2 = Y_bl_ext1 # BOT_RIGHT extend to middle lower point
X_bl_ext3 = X_bl_ext2
Y_bl_ext3 = buffer_cord_y2 - 10 # BOT_RIGHT extend to middle lower point
X_bl_ext4 = buffer_cord_x + 10
Y_bl_ext4 = Y_bl_ext3
X_bl = X_bl_ext4
Y_bl = buffer_cord_y + 10 # BOT_LEFT:
X_br = X_tr
Y_br = Y_bl # BOT_RIGHT
Outer_corners_Big_pad2_2 = [(X_tr, Y_tr),
(X_tl, Y_tl),
(X_bl_ext1, Y_bl_ext1),
(X_bl_ext2, Y_bl_ext2),
(X_bl_ext3, Y_bl_ext3),
(X_bl_ext4, Y_bl_ext4),
(X_bl, Y_bl),
(X_br, Y_br)
]
# --------------------------------------------------------------------------------------------------------------
small_pad_arm2_1 = Polygon(outer_corners_arm2_1)
small_pad_arm2_2 = Polygon(outer_corners_arm2_2)
small_pad_arm1_1 = Polygon(outer_corners_arm1_1)
small_pad_arm1_2 = Polygon(outer_corners_arm1_2)
Big_pad1_1 = Polygon(Outer_corners_Big_pad1_1)
Big_pad1_2 = Polygon(Outer_corners_Big_pad1_2)
Big_pad2_1 = Polygon(Outer_corners_Big_pad2_1)
Big_pad2_2 = Polygon(Outer_corners_Big_pad2_2)
return Finger_lower1, Finger_upper1, \
Finger_lower_other_side1, Finger_upper_other_side1, \
small_pad_arm1_1, small_pad_arm1_2 ,small_pad_arm2_1, small_pad_arm2_2, \
Big_pad1_1, Big_pad1_2, Big_pad2_1, Big_pad2_2, \
top_right_x, y_mid_IDT, shift_2, \
right_top_small_pad_TL_X, right_top_small_pad_TL_Y
def make_Acoustic_waveguides(init_width, fin_width, prop_length, top_right_x, y_mid_IDT, L_IDT_area ):
phononicWG_initial_width = init_width
phononicWG_fin_width = fin_width
Wg_x_offset = 1.5
L_initial_width = L_IDT_area * 1.8
# Create the Acoustic waveguide
# left_coupler = GratingCoupler.make_traditional_coupler(origin, angle=0, **coupler_params)
Aco_wg = Waveguide.make_at_port(Port((top_right_x - Wg_x_offset, y_mid_IDT - 5 ), angle=-np.pi, width=phononicWG_initial_width))
Aco_wg.add_straight_segment(length=L_initial_width)
Aco_wg.add_straight_segment(length=20, final_width=phononicWG_fin_width) # Ini to trans _90um
# wg1.add_bend(-pi/2, radius=50)
Aco_wg.add_straight_segment(length=prop_length - 0.93*L_initial_width - 40 ) #Constant width for prop_length distance
Aco_wg.add_straight_segment(length=20, final_width=phononicWG_initial_width) #trans to ini _ 90um
Aco_wg.add_straight_segment(length=L_initial_width)
# ring_res = RingResonator.make_at_port(Aco_wg.current_port, gap=resonator_gap, radius=resonator_radius)
# right_coupler = GratingCoupler.make_traditional_coupler_at_port(wg2.current_port, **coupler_params)
return Aco_wg
def make_ZnO_pad(Finger_length, right_top_small_pad_TL_X, right_top_small_pad_TL_Y):
# Create ZnO
# ZnO Expand
ZnO_expand_x = 2
ZnO_expand_y = 1
ZnO_pad_width = 65
Left_ZnO_OFFSET_X = 203
# Make_ZnO_Pad
Outer_corner_ZnO_pad_R = [(right_top_small_pad_TL_X - ZnO_expand_x, right_top_small_pad_TL_Y + ZnO_expand_y ), #Bot Right
(right_top_small_pad_TL_X + Finger_length + ZnO_expand_x, right_top_small_pad_TL_Y + ZnO_expand_y), #Top Right
(right_top_small_pad_TL_X + Finger_length + ZnO_expand_x, right_top_small_pad_TL_Y -ZnO_pad_width - ZnO_expand_y), #Top Left
(right_top_small_pad_TL_X - ZnO_expand_x, right_top_small_pad_TL_Y -ZnO_pad_width - ZnO_expand_y)] #Bot Left
Outer_corner_ZnO_pad_L = [(right_top_small_pad_TL_X - ZnO_expand_x - Left_ZnO_OFFSET_X, right_top_small_pad_TL_Y + ZnO_expand_y),
# Bot Right
(right_top_small_pad_TL_X + Finger_length + ZnO_expand_x - Left_ZnO_OFFSET_X, right_top_small_pad_TL_Y + ZnO_expand_y), # Top Right
(right_top_small_pad_TL_X + Finger_length + ZnO_expand_x - Left_ZnO_OFFSET_X, right_top_small_pad_TL_Y - ZnO_pad_width - ZnO_expand_y), # Top Left
(right_top_small_pad_TL_X - ZnO_expand_x - Left_ZnO_OFFSET_X, right_top_small_pad_TL_Y - ZnO_pad_width - ZnO_expand_y)] # Bot Left
ZnO_pad_R = Polygon(Outer_corner_ZnO_pad_R)
ZnO_pad_L = Polygon(Outer_corner_ZnO_pad_L)
return ZnO_pad_R, ZnO_pad_L
def make_EBL_markers(layout_cell):
# change marker dimension
cross_l = 20
croww_w = 5
paddle_l = 5
paddle_w = 5
square_marker_size = 10
top_marker_y = 3400
right_most_marker_x = 6000
X_Position_list = [0, right_most_marker_x / 4, right_most_marker_x / 2, right_most_marker_x * 3 / 4,
right_most_marker_x]
Y_Position_list = [0, top_marker_y / 2, top_marker_y]
Layer_list = [1, 2]
# Make Global Marker
for x_position in X_Position_list:
for y_position in Y_Position_list:
for layer in Layer_list:
if layer == 1:
layout_cell.add_ebl_marker(layer=layer,
marker=CrossMarker(origin=(x_position, y_position), cross_length=cross_l,
cross_width=croww_w,
paddle_length=paddle_l, paddle_width=paddle_w))
else:
layout_cell.add_ebl_marker(layer=layer,
marker=SquareMarker(
(x_position + square_marker_size, y_position + square_marker_size),
square_marker_size))
layout_cell.add_ebl_marker(layer=layer,
marker=SquareMarker(
(x_position + square_marker_size, y_position - square_marker_size),
square_marker_size))
layout_cell.add_ebl_marker(layer=layer,
marker=SquareMarker(
(x_position - square_marker_size, y_position + square_marker_size),
square_marker_size))
layout_cell.add_ebl_marker(layer=layer,
marker=SquareMarker(
(x_position - square_marker_size, y_position - square_marker_size),
square_marker_size))
def generate_device_cell( sweep1, sweep2, cell_name):
#Make the IDT fingers
Finger_lower1, Finger_upper1, \
Finger_lower_other_side1, Finger_upper_other_side1,\
small_pad_arm1_1, small_pad_arm1_2, \
small_pad_arm2_1, small_pad_arm2_2,\
Big_pad1_1, Big_pad1_2, Big_pad2_1, Big_pad2_2, \
top_right_x, y_mid_IDT, shift_2, \
right_top_small_pad_TL_X, right_top_small_pad_TL_Y = make_Chirp_IDT_Fingers_pairs(figer_widths=1,
number_of_pairs=55,
IDT_Aperature=50,
prop_length = 200
)
# Make ZnO pads
ZnO_pad_R, ZnO_pad_L = make_ZnO_pad(Finger_length=50,
right_top_small_pad_TL_X = right_top_small_pad_TL_X,
right_top_small_pad_TL_Y= right_top_small_pad_TL_Y)
# Make Acoustic waveguide
Aco_wg = make_Acoustic_waveguides(init_width = sweep1,
fin_width = sweep2,
prop_length= 200,
top_right_x= top_right_x,
y_mid_IDT= y_mid_IDT, L_IDT_area= shift_2 )
Fingers = geometric_union([Finger_lower1, Finger_upper1,
Finger_lower_other_side1, Finger_upper_other_side1,
small_pad_arm1_1, small_pad_arm1_2,
small_pad_arm2_1, small_pad_arm2_2
])
Pads = geometric_union([Big_pad1_1, Big_pad1_2, Big_pad2_1, Big_pad2_2])
pads = geometric_union([ZnO_pad_R, ZnO_pad_L, Big_pad1_1, Big_pad1_2, Big_pad2_1, Big_pad2_2])
ZnO_under_pad_and_fingers = pads.buffer(1)
# Add name to cell
text = Text(origin=[-500, -300], height=50, text=str(cell_name), alignment='left-bottom')
#Add Cell
cell = Cell('SIMPLE_RES_DEVICE r={:.4f} g={:.4f}'.format(sweep1, sweep2))
cell.add_to_layer(1, convert_to_positive_resist([Aco_wg],30), text)
cell.add_to_layer(2, ZnO_under_pad_and_fingers)
cell.add_to_layer(3, Fingers)
cell.add_to_layer(4, Pads)
#cell.add_to_layer(5, holes)
#cell.add_to_layer(5, left_coupler)
#cell.add_ebl_marker(layer=1, marker=CrossMarker(origin=(-500,-300 ), cross_length=10 , cross_width=5, paddle_length=5, paddle_width=5))
return cell
if __name__ == "__main__":
layout = GridLayout(title='Phononic Waveguides', frame_layer=0, text_layer=3, region_layer_type=None, horizontal_spacing=100, vertical_spacing=0)
#Parameters wanted to scan-------------------------------------------
#1550nm (best performance max_duty=0.80 grating_pitch=0.76)
maximum_duty = np.linspace(0.75, 0.82, num = 2)
grating_pitch = np.linspace(0.75, 0.77, num= 2)
step =12
#number_of_finger_pairs = np.linspace(30, 90, num= int( (90-30) / step ) +1)
#IDT_Aperatures = np.linspace(50, 200, num=6)
number_of_finger_pairs = [55]
IDT_Aperatures = [50]
#Acoustic waveguide parameters
initial_acoustic_width = [10, 20, 30 ,40 ,50]
final_acoustic_width = [1, 5, 10, 20, 50]
Parameters_scan_1 = initial_acoustic_width
Parameters_scan_2 = final_acoustic_width
print('Scan1 =', initial_acoustic_width)
print('Scan2 =', final_acoustic_width)
#--------------------------------------------------------------------
total = len(Parameters_scan_1) * len(Parameters_scan_2)
count = 0
#--------------------------------------------------------------------
#Get input from user to see if they want show or save
answer = input('Show or Save Layout?\n Input (save/show):')
if answer == 'show':
show = True
if answer == 'save':
show = False
else:
show = True
#--------------------------------------------------------------------
#Show or save running procedure
if show == True:
# Add column labels
layout.add_column_label_row(('G_P= %0.2f' % 0.7 ), row_label='')
layout.add_to_row(generate_device_cell( sweep1=55, sweep2=50, cell_name = '1' ))
layout_cell, mapping = layout.generate_layout()
layout_cell.show()
if show == False:
#Start looping over the scanned parameters
#Add column labels
layout.add_column_label_row(('W_fin= %0.1f' % param_2 for param_2 in Parameters_scan_2), row_label='')
for param_1 in Parameters_scan_1:
layout.begin_new_row('W_ini=\n%0.1f' % param_1)
for param_2 in Parameters_scan_2:
count = count + 1
complete = count/total
print("Number of cell generated / Total cell = %0.1f/%0.1f (%0.2f%% complete) " %(count ,total,complete*100) )
layout.add_to_row(generate_device_cell( sweep1= param_1, sweep2=param_2, cell_name=count), alignment='center-center' , realign=True)
layout_cell, mapping = layout.generate_layout()
layout_cell.add_ebl_frame(layer=1, size=40, frame_generator=raith_marker_frame, n=2)
# Show and then save the layout!
make_EBL_markers(layout_cell)
print('saving........')
layout_cell.show()
layout_cell.save('Phononic_v0.gds', parallel=True)
print('saved!!!') |
from aoc import aoc
from functools import reduce
lines = aoc.read_lines('data/08.txt')
def unique_patterns(line):
first = line.split(' | ')[0]
return [''.join(sorted(p)) for p in first.split()]
def find_mapping(line):
patterns = unique_patterns(line)
one = next(p for p in patterns if len(p) == 2)
seven = next(p for p in patterns if len(p) == 3)
four = next(p for p in patterns if len(p) == 4)
eight = next(p for p in patterns if len(p) == 7)
nine = next(p for p in patterns if len(p) == 6 and set(four).issubset(set(p)))
six = next(p for p in patterns if len(p) == 6 and not set(one).issubset(set(p)))
zero = next(p for p in patterns if len(p) == 6 and p != nine and p != six)
two = next(p for p in patterns if len(p) == 5 and not set(p).issubset(set(nine)))
five = next(p for p in patterns if len(p) == 5 and set(p).issubset(set(six)))
three = next(p for p in patterns if len(p) == 5 and p != two and p != five)
return [zero, one, two, three, four, five, six, seven, eight, nine]
def coded_numbers(line):
second = line.split(' | ')[1]
return [''.join(sorted(p)) for p in second.split()]
def get_number(line, mapping):
numbers = coded_numbers(line)
return int(''.join([str(mapping.index(num)) for num in numbers]))
def unique_coded_numbers(line):
numbers = coded_numbers(line)
return len([num for num in numbers if len(num) in [2, 3, 4, 7]])
sum = reduce(lambda a, b: a + b, [unique_coded_numbers(line) for line in lines])
print(sum)
sum = reduce(lambda a, b: a + b, [get_number(line, find_mapping(line)) for line in lines])
print(sum)
|
import sys
import os
from os import path
libpath = path.normpath(path.join(path.dirname(path.realpath(__file__)), os.pardir, "src"))
sys.path.append(libpath)
import elasticsearch as es
import pytrec_eval
from datasets import Robust2004
import numpy as np
import random
import torch
from torch.utils.data import DataLoader, random_split
def msearch_preprocess(query_texts, index="robust2004", doc_type="trec"):
body = []
header = {"index": index, "type": doc_type}
for query_txt in query_texts:
body.append(header)
# query text needs to be a string
query = {"size": MAX_DOC, "query": {"query_string": {"query": " ".join(query_txt), "default_field": "text"}}}
body.append(query)
return body
def retrieve_doc_ids(hits):
ret = {hit["_id"]: hit["_score"] for hit in hits}
return ret
MAX_DOC = 1000
index = "robust2004-0.5-1"
doc_type = "trec"
seed = 5652
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
train_len = 200
dataset = Robust2004.torch_dataset()
dataclasses = Robust2004.dataclasses()
dataclasses = {dc._id: dc for dc in dataclasses}
trainset, testset = random_split(dataset, [train_len, len(dataset) - train_len])
train_ids = [str(r[2].long().tolist()) for r in trainset]
test_ids = [str(r[2].long().tolist()) for r in testset]
print(len(dataclasses))
train_queries = [v.query for k, v in dataclasses.items() if k in train_ids]
test_queries = [v.query for k, v in dataclasses.items() if k in test_ids]
train_qrel = {str(id_): dataclasses[id_].qrels for id_ in train_ids}
test_qrel = {str(id_): dataclasses[id_].qrels for id_ in test_ids}
train_msearch_body = msearch_preprocess(train_queries, index, doc_type)
test_msearch_body = msearch_preprocess(test_queries, index, doc_type)
print("search")
print(test_msearch_body)
engine = es.Elasticsearch()
train_res = engine.msearch(train_msearch_body, index)["responses"]
test_res = engine.msearch(test_msearch_body, index)["responses"]
print("id retrieval")
train_doc_ids = []
for resp in train_res:
try:
train_doc_ids.append(retrieve_doc_ids(resp["hits"]["hits"]))
except:
print("error")
train_doc_ids.append({})
print("test")
test_doc_ids = []
for resp in test_res:
try:
test_doc_ids.append(retrieve_doc_ids(resp["hits"]["hits"]))
except:
print("error")
test_doc_ids.append({})
#doc_ids = [retrieve_doc_ids(resp["hits"]["hits"]) for resp in res]
train_res_dict = dict(zip(train_ids, train_doc_ids))
test_res_dict = dict(zip(test_ids, test_doc_ids))
print("train eval")
evaluator = pytrec_eval.RelevanceEvaluator(train_qrel, set(("map",)))
# below line is the bottleneck
train_map_scores = evaluator.evaluate(train_res_dict)
train_map_scores = [a["map"] for a in train_map_scores.values()]
train_map_score = sum(train_map_scores) / len(train_map_scores)
print("test eval")
evaluator = pytrec_eval.RelevanceEvaluator(test_qrel, set(("map",)))
# below line is the bottleneck
test_map_scores = evaluator.evaluate(test_res_dict)
test_map_scores = [a["map"] for a in test_map_scores.values()]
test_map_score = sum(test_map_scores) / len(test_map_scores)
print("Train MAP :", train_map_score, "\tTest MAP :", test_map_score)
|
# global.py
# 3. 不能先声明局部变量,再用global声明为全局变量,此做法不
# 附合规则
v = 100
def f1():
v = 200
print(v)
global v # 警告,且没有创建局部变量
v += 300
print(v)
f1()
print("v=", v) # ???? |
import tkinter as Tk
from const import (WIDTH, HEIGHT)
BACKGROUND_COLOR = 'white'
class Frame(Tk.Frame):
def __init__(self, master=None):
Tk.Frame.__init__(self, master)
self.master.title("FPS Simulater")
self.master.geometry("+20+20")
self.cvs = Tk.Canvas(self, width=WIDTH, height=HEIGHT, relief=Tk.SUNKEN, borderwidth=2, bg=BACKGROUND_COLOR)
self.cvs.pack(fill=Tk.BOTH, expand=1)
|
import unittest
from math import sqrt
from ..src.objects.bar import Bar
from ..src.objects.nodes import Node2D
from ..src.objects.materials.material import ElasticMaterial
from ..src.objects.sections import CircleBar
class TestMaterials(unittest.TestCase):
_steel_young_m = 210e9
_steel_pos = 0.3
steel = ElasticMaterial(
young_m=_steel_young_m, poisson=_steel_pos)
_aluminium_kirhf_m = 25.5
_aluminium_pos = 0.33
aluminium = ElasticMaterial(
kirchff_m=_aluminium_kirhf_m, poisson=_aluminium_pos)
_concrete_young_m = 27e9
_concrete_kirhf_m = 11.25e9
concrete = ElasticMaterial(
young_m=_concrete_young_m, kirchff_m=_concrete_kirhf_m)
def test_kirchoff_modulus(self):
steel_kirch_modulus = self._steel_young_m/(2*(1+self._steel_pos))
self.assertEqual(self.steel.kirchoffs_modulus, steel_kirch_modulus)
def test_young_modulus(self):
alum_young_modulus = 2*self._aluminium_kirhf_m*(1+self._aluminium_pos)
self.assertEqual(self.aluminium.youngs_modulus, alum_young_modulus)
def test_poissons_ratio(self):
conc_pos_ratio = ((self._concrete_young_m-2*self._concrete_kirhf_m) /
(2*self._concrete_kirhf_m))
self.assertEqual(self.concrete.poissons_ratio, conc_pos_ratio)
class TestLengthOfBarObjects(unittest.TestCase):
sample_sec = CircleBar(1)
sample_mat = ElasticMaterial(210e9, 0.3)
def test_length_horizontal(self):
node_1 = Node2D(x=0, y=0)
node_2 = Node2D(x=5, y=0)
test_bar = Bar(node_1, node_2, self.sample_sec, self.sample_mat)
self.assertEqual(test_bar.length, 5)
def test_length_vertical(self):
node_1 = Node2D(x=0, y=0)
node_2 = Node2D(x=0, y=5)
test_bar = Bar(node_1, node_2, self.sample_sec, self.sample_mat)
self.assertEqual(test_bar.length, 5)
def test_length_complex(self):
node_1 = Node2D(x=0, y=0)
node_2 = Node2D(x=5, y=5)
test_bar = Bar(node_1, node_2, self.sample_sec, self.sample_mat)
self.assertEqual(test_bar.length, 5*sqrt(2))
class TestAnglesOfBarObjects(unittest.TestCase):
sample_sec = CircleBar(1)
sample_mat = ElasticMaterial(210e9, 0.3)
x1, x2, y1, y2 = 3, 5, 10, 30
node_1 = Node2D(x=x1, y=y1)
node_2 = Node2D(x=x2, y=y2)
test_bar = Bar(node_1, node_2, sample_sec, sample_mat)
manual_calc_len = sqrt((x2-x1)**2 + (y2-y1)**2)
def test_sin_angle(self):
manual_calc_sin_angle = (self.y2-self.y1)/self.manual_calc_len
self.assertEqual(
self.test_bar.sin_of_angle, manual_calc_sin_angle
)
def test_cos_angle(self):
manual_calc_cos_angle = (self.x2-self.x1)/self.manual_calc_len
self.assertEqual(
self.test_bar.cos_of_angle, manual_calc_cos_angle
)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Test Sep client class."""
from __future__ import print_function
import datetime
from dateutil.tz import tzutc
from mock import patch
import pytest
from fn_aws_iam.lib.aws_iam_client import *
from .mock_artifacts import *
"""
Suite of tests to test AWS IAM client class
"""
def assert_keys_in(json_obj, *keys):
for key in keys:
assert key in json_obj
def get_config():
return dict({
"aws_iam_access_key_id": "AKAABBCCDDEEFFGGHH12",
"aws_iam_secret_access_key": "pplXXEEK/aAbBcCdDeEfFgGhHiH1234567+sssss",
"aws_iam_region": None
})
class TestAWSIAMClient:
""" Test aws_iam_client using mocked data. """
""" Test sep_client._get_client"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("service, expected_result", [
("iam", "botocore.client.IAM object at "),
("sts", "botocore.client.STS object at "),
])
def test_get_client(self, mock_id, service, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli._get_client(service)
assert(expected_result in repr(response))
""" Test sep_client.__get_type_from_response"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("op, type_list, expected_result", [
("get_user", SUPPORTED_GET_TYPES, "User"),
("list_user_tags", SUPPORTED_GET_TYPES, "Tags"),
("get_login_profile", SUPPORTED_GET_TYPES, "LoginProfile"),
])
def test_get_type_from_response(self, mock_id, op, type_list, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli._get_type_from_response(get_cli_raw_responses(op), type_list)
assert(expected_result in repr(response))
""" Test sep_client.__get_type_from_response negative case"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("op, type_list, expected_result", [
("get_user", SUPPORTED_PAGINATE_TYPES, "No supported type for integration found in AWS IAM response")
])
def test_get_type_from_response_err(self, mock_id, op, type_list, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
with pytest.raises(ValueError) as e:
response = iam_cli._get_type_from_response(get_cli_raw_responses(op), type_list)
assert str(e.value) == expected_result
""" Test sep_client._add_user_properties"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient.get', side_effect=mocked_client_get_profile)
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("result, expected_result", [
(mocked_iam_pre_results("pre_result_add_prop"), mock_client_results("expected_result_add_prop")),
(mocked_iam_pre_results("pre_result_default_add_prop"), mock_client_results("expected_result_default_add_prop")),
(mocked_iam_pre_results("pre_result_with_profile_add_prop"), mock_client_results("expected_result_with_profile_add_prop")),
])
def test_add_user_properties(self, mock_profile, mock_id, result, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli._add_user_properties(result)
assert(expected_result == response)
""" Test sep_client._update_result"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient.get', side_effect=mocked_client_get_profile)
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("result, result_type, expected_result", [
(mocked_iam_pre_results("get_user"), "User", mock_client_results("expected_result_default_add_prop")),
(mocked_iam_pre_results("groups"), "groups", mock_client_results("expected_result_upd_group")),
(mocked_iam_pre_results("access_keys"), "AccessKeyMetadata", mock_client_results("expected_result_upd_keys")),
])
def test_update_result(self, mock_profile, mock_id, result, result_type, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli._update_result(result, result_type)
assert(expected_result == response)
""" Test sep_client._datetime_to_str"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("result_entry, expected_result", [
(mocked_iam_pre_results("get_user")[0], ["2019-10-31 16:23:07", "2019-11-15 17:11:28"])
])
def test_datetime_to_str(self, mock_id, result_entry, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli._datetime_to_str(result_entry)
assert(expected_result[0] == response["CreateDate"])
assert (expected_result[1] == response["PasswordLastUsed"])
""" Test sep_client.paginate"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient.get', side_effect=mocked_client_get_profile)
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._filter', side_effect=None)
@patch('botocore.client.BaseClient.get_paginator', side_effect=mocked_client_paginator)
@pytest.mark.parametrize("op, expected_result", [
("list_users", mock_client_results("expected result_pagination"))
])
def test_paginate(self, mock_orofile, mock_id, mock_filter, mock_paginator, op, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli.paginate(op)
assert (expected_result == response)
""" Test sep_client.get"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_client', side_effect=mocked_iam)
@pytest.mark.parametrize("op, expected_result", [
("get_user", mock_client_results("expected_result_get"))
])
def test_get(self, mock_id, mock_iam, op, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli.get(op)
assert (expected_result == response)
""" Test sep_client.post"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_client', side_effect=mocked_iam)
@pytest.mark.parametrize("op, expected_result", [
( "get_user", "OK"),
( "delete_user", "OK")
])
def test_post(self, mock_id, mock_iam, op, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli.post(op)
assert (expected_result == response)
""" Test sep_client._filter"""
@patch('fn_aws_iam.lib.aws_iam_client.AwsIamClient._get_default_identity', side_effect=get_default_identity)
@pytest.mark.parametrize("result, results_filter, return_filtered, expected_result", [
(get_func_responses("list_users"), None, True, 4),
(get_func_responses("list_users"), {'UserName': u'test_user'}, True, 0),
(get_func_responses("list_users"), {'UserName': 'iam_list_User_1'}, True, 1),
(get_func_responses("list_access_keys"), {'AccessKeyId': 'ABC123CDE456FGH789IJ'}, True, 1),
(get_func_responses("list_access_keys"), {'AccessKeyId': '123'}, True, 2),
(get_func_responses("list_groups_for_user"), {'GroupName': u'test_group'}, False, [0, 2]),
(get_func_responses("list_groups_for_user"), {'GroupName': 'null_group'}, False, [1, 2]),
(get_func_responses("list_policies"), {'PolicyName': u'test_policy'}, False, [0, 3]),
(get_func_responses("list_policies"), {'PolicyName': 'deny_all'}, False, [1, 3]),
])
def test__filter(self, mock_id, result, results_filter, return_filtered, expected_result):
options = {}
iam_cli = AwsIamClient(get_config())
response = iam_cli._filter(result, results_filter, return_filtered=return_filtered)
if isinstance(expected_result, list):
assert (expected_result[0] == response[0])
assert (expected_result[1] == len(response[1]))
else:
assert (expected_result == response[0])
assert (expected_result == len(response[1])) |
#!/usr/bin/env python
def fields_from_list(line, columns=None, default=""):
for n in columns or xrange(len(line)):
try:
yield line[n].rstrip()
except IndexError:
yield default
def split_lines(lines, fields=None, delim=None):
for line in lines:
yield tuple(fields_from_list(line.split(delim), fields))
def get_list_from_str(str_, cast_callable=int):
return [cast_callable(x.strip()) for x in str_.split(',')]
def main():
import sys
from optparse import OptionParser
argp = OptionParser(version="0.2",
description="Python port of cut with sort by field")
argp.add_option("-f","--fields", dest="column_ordinals",
help="Field Number")
argp.add_option("-d","--delimeter", dest="idelim",
help="Field Delimiter",
default=None)
argp.add_option("-o","--output-delim", dest="odelim",
help="Output Delimiter",
default='||')
argp.add_option('-O', "--output-formatstr", dest="output_formatstr",
help="Output Formatter")
argp.add_option("--nh","--no-header", dest="sheader",
action="store_true",
help="Drop First Line")
argp.add_option("-s","--sort-asc", dest="sort_a",
help="Sort Ascending by field number")
argp.add_option("-r","--sort-reverse", dest="sort_reverse",
action='store_true',
default=False,
help="Reverse the sort order")
(options,args) = argp.parse_args()
ilines = sys.stdin.readlines()
if (options.sheader):
ilines.pop(0)
if (options.column_ordinals):
nl = []
column_ordinals = get_list_from_str(options.column_ordinals)
nl = split_lines(ilines, column_ordinals, options.idelim)
else:
nl = split_lines(ilines, delim=options.idelim)
if (options.sort_a):
columns = get_list_from_str(options.sort_a)
#column_indexes = fields.index(int(options.sort_a))
nl = sorted(nl,
key=lambda row: list(fields_from_list(row,columns)),
reverse=options.sort_reverse)
if options.output_formatstr:
fmtstr = options.output_formatstr
for line in nl:
print(fmtstr % list(line))
else:
for line in nl:
print(options.odelim.join(line).rstrip())
if __name__=="__main__":
main()
|
#tcp server
#web stranica ima server na kojem je hostana.on ceka,osluskuje konekcije
import socket
server_socket=socket.socket()
host=socket.gethostname()
port =9999
server_socket.bind((host,port))
print "Waiting for connection..."
server_socket.listen(5)
while True:
conn,addr=server_socket.accept()
print 'Got connection from', addr
conn.send('Server Saying HI')
conn.close() |
import math
from decimal import Decimal
def main(x):
ac= Decimal(1 /(1+Decimal(math.e)**(Decimal(-x))))
print ac
#print 1+math.e**-x
#print math.log(1-ac, math.e)
demain(ac)
def demain(y):
a =Decimal(1/y)
b = Decimal(a-1)
print math.fabs(math.log(b)), "Math"
print math.fabs(b.ln()), "Decimal"
main(61) |
# 列表 or 数组
squares = [1, 3, 7, 9, 11];
# 1.索引
print(squares[2]);
print(squares[-1]);
# 2.切片
# (1)简单的使用
print(squares[1:]);
print(squares[:-1]);
# (2)替换数组元素
squares[1:3] = [45,23,56];
print(squares);
# (3)删除数组元素
squares[1:3] = [];
print(squares);
# (4)清空数组
arr = [1,2,3,4];
arr[:] = []; # or arr = []
print('arr',arr);
# 3.类字符串的添加
print(squares + [10, 23]);
# 4.List.append 追加元素
cubes = [1, 8, 27];
cubes.append(64);
print(cubes);
# 练习
# 斐波那契数列
def fibonacci(num):
indexNum = 0;
nextIndexNum = 1;
arr = [];
while nextIndexNum <= num:
arr.append(nextIndexNum);
indexNum, nextIndexNum = nextIndexNum, indexNum + nextIndexNum;
return arr;
print(fibonacci(23));
|
from rest_framework import serializers
from . import models
#Serelisation Domain
class TempsInternetSerializer(serializers.ModelSerializer):
class Meta:
#year = serializers.DateField(format='%Y')
model = models.TempsInternet
fields = ('id','temps_moyens_internet','created_at')
def create(self, validated_data):
temps = models.TempsInternet(
temps_moyens_internet=validated_data['temps_moyens_internet'],
)
temps.save()
return temps
def update(self, instance,validated_data):
instance.temps_moyens_internet = validated_data['temps_moyens_internet'],
instance.save()
return instance |
import platform
from setuptools import setup, Extension
from distutils.core import setup
s = platform.platform()
if s.startswith('Linux'):
setup(name='v4l2_camera',
ext_modules=[Extension("v4l2_camera", sources=["v4l2_camera_module.cpp", "v4l2_camera.cpp"], language="c++")
],
language="c++"
)
else:
from Cython.Build import cythonize
setup(
name='Hello world app',
ext_modules=cythonize(["v4l2_camera.pyx"], language="c++"),
)
# python setup.py build_ext --inplace
# v4l2_camera_module.
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
setup(
name="iorn",
version="1.0",
description="IORN: An Effective Remote Sensing Image Scene Classification Framework, based on Oriented Response Networks",
author="Jue Wang",
author_email="2120170825@bit.edu.cn",
# Require cffi.
install_requires=["cffi>=1.0.0"],
setup_requires=["cffi>=1.0.0"],
# Exclude the build files.
packages=find_packages(exclude=["build"]),
# Package where to put the extensions. Has to be a prefix of build.py.
ext_package="",
# Extensions to compile.
cffi_modules=[
os.path.join(os.path.dirname(__file__), "build.py:ffi")
],
)
|
import gym
import lunarlander_theta
import torch
import numpy as np
from train_optimal_agent import QNetwork
import pickle
def gen_traj(episodes, t_delay=8, theta=None):
# load environment
env = gym.make('LunarLanderTheta-v0')
# load our trained q-network
path = "models/dqn_" + theta + ".pth"
qnetwork = QNetwork(state_size=8, action_size=4, seed=1)
qnetwork.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
qnetwork.eval()
softmax = torch.nn.Softmax(dim=1)
dataset = []
for episode in range(episodes):
state = env.reset(theta=theta)
xi = []
episode_reward = 0
for t in range(1000):
if t%t_delay == 0:
with torch.no_grad():
state_t = torch.from_numpy(state).float().unsqueeze(0)
action_values = qnetwork(state_t)
action_values = softmax(action_values).cpu().data.numpy()[0]
action = np.argmax(action_values)
# env.render() # can always toggle visualization
next_state, _, done, info = env.step(action)
awake = info["awake"]
reward = info["reward"]
xi.append([t] + [action] + [awake] + [state])
state = next_state
episode_reward += reward
if done:
print("\rReward: {:.2f}\tLanded: {}\tReward: {}"\
.format(episode_reward, awake, theta), end="")
dataset.append(xi)
break
env.close()
return dataset
def birl_belief(beta, D, O):
rewards_D_1 = np.asarray([Reward(xi,"center") for xi in D], dtype = np.float32)
rewards_XiR_1 = np.asarray([Reward(xi,"center") for xi in O["center"]], dtype = np.float32)
rewards_D_2 = np.asarray([Reward(xi,"anywhere") for xi in D], dtype = np.float32)
rewards_XiR_2 = np.asarray([Reward(xi,"anywhere") for xi in O["anywhere"]], dtype = np.float32)
rewards_D_3 = np.asarray([Reward(xi,"crash") for xi in D], dtype = np.float32)
rewards_XiR_3 = np.asarray([Reward(xi,"crash") for xi in O["crash"]], dtype = np.float32)
# Reward for landing in middle
n1 = np.exp(beta*sum(rewards_D_1))
d1 = np.exp(beta*sum(rewards_XiR_1))
p1 = n1/d1
# Reward for landing anywhere
n2 = np.exp(beta*sum(rewards_D_2))
d2 = np.exp(beta*sum(rewards_XiR_2))
p2 = n2/d2
# Reward for crashing in the middle
n3 = np.exp(beta*sum(rewards_D_3))
d3 = np.exp(beta*sum(rewards_XiR_3))
p3 = n3/d3
Z = p1 + p2 + p3
b = [p1/Z, p2/Z, p3/Z]
return b
def main():
episodes = 25
t_delay = 10
confidence = {'center': [], 'anywhere': [], 'crash': []}
confidence['center'] = gen_traj(episodes, theta="center")
confidence['anywhere'] = gen_traj(episodes, theta="anywhere")
confidence['crash'] = gen_traj(episodes, theta="crash")
pickle.dump( confidence, open( "choices/confidence.pkl", "wb" ) )
demos = pickle.load( open( "choices/demos.pkl", "rb") )
if __name__ == "__main__":
main()
|
import datetime
import uuid
import sqlalchemy
from wintellect_demo.data.modelbase import SqlAlchemyBase
class CmsPage(SqlAlchemyBase):
__tablename__ = 'CMSPage'
url = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
created_date = sqlalchemy.Column(sqlalchemy.DateTime,
default=datetime.datetime.now)
html = sqlalchemy.Column(sqlalchemy.String)
is_redirect = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
redirect_url = sqlalchemy.Column(sqlalchemy.String)
|
from django.conf import settings
import boto3
class Bucket:
def __init__(self, *args, **kwargs):
session = boto3.session.Session()
self.conn = session.client(
service_name=settings.AWS_SERVICE_NAME,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
endpoint_url=settings.AWS_S3_ENDPOINT_URL,
)
def get_objects(self):
keys = []
res = self.conn.list_objects_v2(Bucket=settings.AWS_STORAGE_BUCKET_NAME)
if res['KeyCount']:
return res['Contents']
return None
def download_object(self, key):
with open(settings.AWS_LOCAL_STORAGE + key, 'wb') as f:
self.conn.download_fileobj(settings.AWS_STORAGE_BUCKET_NAME, key, f)
def delete_object(self, key):
self.conn.delete_object(Bucket=self.AWS_STORAGE_BUCKET_NAME, Key=key)
bucket = Bucket() |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 17:03:26 2018
@author: Mohammad SAFEEA
Test script of iiwaPy class.
"""
from sunrisePy import sunrisePy
import time
ip='172.31.1.148'
#ip='localhost'
iiwa=sunrisePy(ip)
iiwa.setBlueOff()
time.sleep(2)
iiwa.setBlueOn()
# read some data from the robot
try:
print('End effector position and orientation is:')
print(iiwa.getEEFPos())
time.sleep(0.1)
print('Forces acting at end effector are')
print(iiwa.getEEF_Force())
time.sleep(0.1)
print('Cartesian position (X,Y,Z) of end effector')
print(iiwa.getEEFCartesianPosition())
time.sleep(0.1)
print('Moment at end effector')
print(iiwa.getEEF_Moment())
time.sleep(0.1)
print('Joints positions')
print(iiwa.getJointsPos())
time.sleep(0.1)
print('External torques at the joints')
print(iiwa.getJointsExternalTorques())
time.sleep(0.1)
print('Measured torques at the joints')
print(iiwa.getJointsMeasuredTorques())
time.sleep(0.1)
print('Measured torque at joint 5')
print(iiwa.getMeasuredTorqueAtJoint(5))
time.sleep(0.1)
print('Rotation of EEF, fixed rotation angles (X,Y,Z)')
print(iiwa.getEEFCartesianOrientation())
time.sleep(0.1)
print('Joints positions has been streamed external torques are:')
print(iiwa.getEEFCartesianOrientation())
time.sleep(0.1)
except:
print('an error happened')
iiwa.close()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import datetime
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.abstract_models import AbstractUser
# Create your models here.
class DateTimeModel(models.Model):
"""
Abstract model that is used for the model using created and modified fields
"""
created = models.DateTimeField(_('Created Date'), auto_now_add=True,
editable=False)
modified = models.DateTimeField(
_('Modified Date'), auto_now=True, editable=False)
def __init__(self, *args, **kwargs):
super(DateTimeModel, self).__init__(*args, **kwargs)
class Meta:
abstract = True
@python_2_unicode_compatible
class Payment_Plan(DateTimeModel):
name = models.CharField(_("Name"), max_length=255)
description = models.TextField(_("Description"), max_length=255)
price = models.IntegerField(_("Price"), default=0)
def __str__(self):
return '%s' % (self.name)
class Meta:
verbose_name = _('Payment Plan')
class User(AbstractUser):
phone = models.CharField(
_('phone'), max_length=255, blank=True)
plan = models.ForeignKey('Payment_Plan', related_name='user_plan_rel', on_delete=models.CASCADE, null=True, blank=True)
# class Meta:
# db_table = 'auth_user'
|
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""lambda-uploader - Simple way to create and upload python lambda jobs"""
from __future__ import print_function
import sys
import logging
import traceback
import lambda_uploader
from os import getcwd, path, getenv
from lambda_uploader import package, config, uploader, subscribers
from boto3 import __version__ as boto3_version
from botocore import __version__ as botocore_version
LOG = logging.getLogger(__name__)
NAMESPACE = 'rax_jira'
CHECK = '✅'
INTERROBANG = '‽'
RED_X = '❌'
LAMBDA = 'λ'
TRACEBACK_MESSAGE = """%s Unexpected error. Please report this traceback.
Uploader: %s
Botocore: %s
Boto3: %s
"""
# Used for stdout for shell
def _print(txt):
# Windows Powershell doesn't support Unicode
if sys.platform == 'win32' or sys.platform == 'cygwin':
print(txt)
else:
# Add the lambda symbol
print("%s %s" % (LAMBDA, txt))
def _execute(args):
pth = path.abspath(args.function_dir)
cfg = config.Config(pth, args.config, role=args.role,
variables=args.variables)
if args.s3_bucket:
cfg.set_s3(args.s3_bucket, args.s3_key)
if args.no_virtualenv:
# specified flag to omit entirely
venv = False
elif args.virtualenv:
# specified a custom virtualenv
venv = args.virtualenv
else:
# build and include virtualenv, the default
venv = None
if args.no_build:
pkg = package.create_package(pth)
else:
_print('Building Package')
requirements = cfg.requirements
if args.requirements:
requirements = path.abspath(args.requirements)
extra_files = cfg.extra_files
if args.extra_files:
extra_files = args.extra_files
pkg = package.build_package(pth, requirements,
venv, cfg.ignore, extra_files,
pyexec=cfg.runtime)
if not args.no_clean:
pkg.clean_workspace()
if not args.no_upload:
# Set publish if flagged to do so
if args.publish:
cfg.set_publish()
create_alias = False
# Set alias if the arg is passed
if args.alias is not None:
cfg.set_alias(args.alias, args.alias_description)
create_alias = True
_print('Uploading Package')
upldr = uploader.PackageUploader(cfg, args.profile)
upldr.upload(pkg)
# If the alias was set create it
if create_alias:
upldr.alias()
if cfg.subscription:
_print('Creating subscription')
subscribers.create_subscriptions(cfg, args.profile)
pkg.clean_zipfile()
_print('Fin')
def main(arv=None):
"""lambda-uploader command line interface."""
# Check for Python 2.7 or later
if sys.version_info[0] < 3 and not sys.version_info[1] == 7:
raise RuntimeError('lambda-uploader requires Python 2.7 or later')
import argparse
parser = argparse.ArgumentParser(
description='Simple way to create and upload python lambda jobs')
parser.add_argument('--version', '-v', action='version',
version=lambda_uploader.__version__)
parser.add_argument('--no-upload', dest='no_upload',
action='store_const', help='dont upload the zipfile',
const=True)
parser.add_argument('--no-clean', dest='no_clean',
action='store_const',
help='dont cleanup the temporary workspace',
const=True)
parser.add_argument('--publish', '-p', dest='publish',
action='store_const',
help='publish an upload to an immutable version',
const=True)
parser.add_argument('--virtualenv', '-e',
help='use specified virtualenv instead of making one',
default=None)
parser.add_argument('--extra-files', '-x',
action='append',
help='include file or directory path in package',
default=[])
parser.add_argument('--no-virtualenv', dest='no_virtualenv',
action='store_const',
help='do not create or include a virtualenv at all',
const=True)
parser.add_argument('--role', dest='role',
default=getenv('LAMBDA_UPLOADER_ROLE'),
help=('IAM role to assign the lambda function, '
'can be set with $LAMBDA_UPLOADER_ROLE'))
parser.add_argument('--variables', dest='variables',
help='add environment variables')
parser.add_argument('--profile', dest='profile',
help='specify AWS cli profile')
parser.add_argument('--requirements', '-r', dest='requirements',
help='specify a requirements.txt file')
alias_help = 'alias for published version (WILL SET THE PUBLISH FLAG)'
parser.add_argument('--alias', '-a', dest='alias',
default=None, help=alias_help)
parser.add_argument('--alias-description', '-m', dest='alias_description',
default=None, help='alias description')
parser.add_argument('--s3-bucket', '-s', dest='s3_bucket',
help='S3 bucket to store the lambda function in',
default=None)
parser.add_argument('--s3-key', '-k', dest='s3_key',
help='Key name of the lambda function s3 object',
default=None)
parser.add_argument('--config', '-c', help='Overrides lambda.json',
default='lambda.json')
parser.add_argument('function_dir', default=getcwd(), nargs='?',
help='lambda function directory')
parser.add_argument('--no-build', dest='no_build',
action='store_const', help='dont build the sourcecode',
const=True)
verbose = parser.add_mutually_exclusive_group()
verbose.add_argument('-V', dest='loglevel', action='store_const',
const=logging.INFO,
help="Set log-level to INFO.")
verbose.add_argument('-VV', dest='loglevel', action='store_const',
const=logging.DEBUG,
help="Set log-level to DEBUG.")
parser.set_defaults(loglevel=logging.WARNING)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
try:
_execute(args)
except Exception:
print(TRACEBACK_MESSAGE
% (INTERROBANG, lambda_uploader.__version__,
boto3_version, botocore_version),
file=sys.stderr)
traceback.print_exc()
sys.stderr.flush()
sys.exit(1)
|
from django.shortcuts import render
from django.http import JsonResponse
from django.views import View
from locacaoeventos.utils.forms import PhotoProvisoryForm
from locacaoeventos.utils.main import base_context
from locacaoeventos.utils.datetime import test_date
from locacaoeventos.apps.place.placecore.models import Place, PlacePhoto, PhotoProvisory
from locacaoeventos.apps.user.buyerprofile.models import BuyerProfile, FamilyMember
class UploadFile(View):
def post(self, request):
print(self.request.FILES["photo"])
form = PhotoProvisoryForm(self.request.POST, self.request.FILES)
if form.is_valid():
photo = form.save()
data = {
'is_valid': True,
'name': photo.photo.name,
'url': photo.photo.url,
'pk': photo.pk
}
else:
data = {'is_valid': False}
return JsonResponse(data)
class GetPhoto(View):
def get(self, request):
photos_list = request.GET.get("photos_list").split(",")
photos = []
for i in range(len(photos_list)):
photo_provisory = PhotoProvisory.objects.get(pk=photos_list[i])
photos.append({
"url":str(photo_provisory.photo),
"pk":str(photo_provisory.pk)
})
data = {"photos":photos}
return JsonResponse(data)
class CreateDeleteFamilyMemberAjax(View):
def get(self, request):
data = {}
familymember_pk = request.GET.get("familymember_pk", None)
if familymember_pk: # Deletes
data = {"familymember_pk":familymember_pk}
familymember = FamilyMember.objects.get(pk=familymember_pk)
familymember.delete()
else: # Creates
familymember_gender = request.GET.get("familymember_gender")
familymember_birthday = request.GET.get("familymember_birthday")
familymember_name = request.GET.get("familymember_name")
familymember_relation = request.GET.get("familymember_relation")
if not test_date(familymember_birthday):
data["error"] = True
else:
familymember = FamilyMember.objects.create(
name = familymember_name,
gender = familymember_gender,
birthday = familymember_birthday,
relation = familymember_relation,
related_to = BuyerProfile.objects.get(user=request.user)
)
data = {
"familymember_gender": familymember_gender,
"familymember_birthday": familymember_birthday,
"familymember_name": familymember_name,
"familymember_relation": familymember_relation,
"pk": familymember.pk
}
return JsonResponse(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.