index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
7,300
1305991a9cd82ddeaffff1545a35ced992e6792f
#################################################################################### # # Kaggle Competition: https://www.kaggle.com/c/msk-redefining-cancer-treatment # Sponsor : Memorial Sloan Kettering Cancer Center (MSKCC) # Author: Amrut Shintre # #################################################################################### ##################### # Importing Libraries ##################### import numpy as np import pandas as pd import matplotlib as plt import re import nltk nltk.download('stopwords') from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler import xgboost as xgb from sklearn.model_selection import train_test_split from sklearn import metrics import gc import random #################### # Importing datasets #################### # Training Dataset train_df = pd.read_csv('training_text', sep = '\|\|', engine = 'python', names = ['ID', 'Text'], header = None) train_df = train_df.iloc[1:,:] train_df.index = range(len(train_df)) train_var = pd.read_csv('training_variants') # Testing Dataset test_df = pd.read_csv('test_text', sep = '\|\|', engine = 'python', names = ['ID', 'Text'], header = None) test_var = pd.read_csv('test_variants') # --------------------------------------------TEXT --------------------------------------------- ############## # TextCleaning ############## def text_cleaning(text_df): corpus = [] for i in range(len(text_df)): text = re.sub('[^a-zA-Z]', ' ', text_df['Text'][i]) # Removing punctuation marks, #numbers, etc and returning only letters text = text.lower() # Converting all the uppercase letters to lowercase text = text.split() # Splitting a sentence into a list of strings containing a single word. ps = PorterStemmer() # Stemming e.g. lovely -> love text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))] text = ' '.join(text) # Joining the cleaned words corpus.append(text) # Appending it to the new list. return (corpus) # Training Text Data corpus_train = text_cleaning(train_df) # Testing Text Data corpus_test = text_cleaning(test_df) ############################################# # Term Frequency - Inverse Document Frequency ############################################# tfidf = TfidfVectorizer() tfidf_tr = tfidf.fit_transform(corpus_train).toarray() tfidf_test = tfidf.transform(corpus_test).toarray() ############################## # Singular Value Decomposition ############################## svd = TruncatedSVD(n_components = 1000) # considering 98% variance in the Data svd_tr = svd.fit_transform(tfidf_tr) # Fitting on cleaned training text data svd_train = svd.transform(tfidf_test) # Transforming on cleaned testing text data svd_tr = pd.DataFrame(svd_tr) svd_test = pd.DataFrame(svd_train) #explainedvar = svd.explained_variance_ratio_ #exp_var = explainedvar.cumsum() # -------------------------------------------- VARIANTS --------------------------------------------- #################### # Dependent Variable #################### y = train_var['Class'].values y = y-1 ################# # Merging Dataset ################# # Merging the dataset for data preparation and feature engineering df = pd.concat([train_var, test_var], axis = 0) df = df.drop(['ID'], axis = 1) df['ID'] = range(df.shape[0]) df.index = range(df.shape[0]) df_text = pd.concat([train_df, test_df], axis = 0) df_text = df_text.drop('ID', axis = 1) df_text['ID'] = range(df_text.shape[0]) df_text.index = range(df_text.shape[0]) df_all = pd.merge(df, df_text, how = 'left', on = 'ID') ################ # Missing Values ################ # Checking for missing values column_list = train_var.columns.values.tolist() missing_values = pd.DataFrame() missing_values['Columns'] = column_list for i in column_list: missing_values['No. of missing values'] = train_var[i].isnull().values.ravel().sum() # There are no missing values. ####################### # Categorical Variables ####################### # Extracting the columns having categorical Variables. column_list = df.columns categorical_columns = [] for i in column_list: if df[i].dtype == 'O': categorical_columns.append(i) # Encoding the columns with categorical variables # Label Encoding for i in categorical_columns: le = LabelEncoder() df[i + '_le'] = le.fit_transform(df[i]) df[i + '_length'] = df[i].map(lambda x: len(str(x))) # Feature Engineering df_all['Gene_Share'] = df_all.apply(lambda r: sum([1 for w in r['Gene'].split(' ') if w in r['Text'].split(' ')]), axis=1) df_all['Variation_Share'] = df_all.apply(lambda r: sum([1 for w in r['Variation'].split(' ') if w in r['Text'].split(' ')]), axis=1) ################### # Splitting Dataset ################### train = df_all.iloc[:len(train_var), :] test = df_all.iloc[len(train_var):,:] test.index = range(len(test_var)) train = train.drop(['Gene', 'Variation', 'ID', 'Text', 'Class'], axis = 1) test = test.drop(['Gene', 'Variation', 'Text', 'ID', 'Class'], axis = 1) train_final = pd.concat([train, svd_tr], axis = 1) test_final = pd.concat([test, svd_test], axis = 1) ################# # Standardization ################# sc = StandardScaler() train_final = sc.fit_transform(train_final) test_final = sc.transform(test_final) train_final = pd.DataFrame(train_final) test_final = pd.DataFrame(test_final) # -------------------------------------------- MODEL --------------------------------------------- ################## # XGBoost Matrix ################## dtrain = xgb.DMatrix(train_final, y) dtest = xgb.DMatrix(test_final) ################## # Cross-Validation ################## def docv(param, iterations, nfold): model_CV = xgb.cv( params = param, num_boost_round = iterations, nfold = nfold, dtrain = dtrain, seed = random.randint(1, 10000), early_stopping_rounds = 100, maximize = False, verbose_eval = 50) gc.collect() best = min(model_CV['test-mlogloss-mean']) best_iter = model_CV.shape[0] print (best) return (best_iter) ######### # Testing ######### def doTest(param, iteration): X_tr, X_val, y_tr, y_val = train_test_split(train_final, y, test_size = 0.2, random_state = random.randint(1,1000)) watchlist = [(xgb.DMatrix(X_tr, y_tr), 'train'), (xgb.DMatrix(X_val, y_val), 'validation')] model = xgb.train( params = param, dtrain = xgb.DMatrix(X_tr, y_tr), num_boost_round = iteration, evals = watchlist, verbose_eval = 50, early_stopping_rounds = 100) score = metrics.log_loss(y_val, model.predict(xgb.DMatrix(X_val)), labels = range(9)) predicted_class = model.predict(dtest) print (score) return (predicted_class) ######### # Bagging ######### def Bagging(N, params, best_iter): for i in range(N): param = params p = doTest(param, best_iter) if i == 0: preds = p.copy() else: preds = preds + p predictions = preds/N predictions = pd.DataFrame(predictions) return (predictions) ################### # Running the Model ################### params = { 'eta': 0.02, 'max_depth': 6, 'objective': 'multi:softprob', 'eval_metric': 'mlogloss', 'silent': False, 'seed': random.randint(1,100), 'num_class': 9 } cross_vali = docv(params, 10000, 5) predicted_class = Bagging(5, params, cross_vali) # -------------------------------------------- SUBMISSION --------------------------------------------- sub_file = pd.DataFrame() sub_file['ID'] = test_var['ID'].values Sub_File = pd.concat([sub_file, predicted_class], axis = 1) Sub_File.columns = ['ID', 'Class1', 'Class2', 'Class3', 'Class4', 'Class5', 'Class6', 'Class7', 'Class8', 'Class9'] Sub_File.to_csv("submission33.csv", index = False) # -------------------------------------------- Project Layout --------------------------------------------- # 1) Text Cleaning # 2) TFIDF Vectorizer and Singular Value Decomposition # 3) Feature Engineering # 4) Building a Model and trying out different models # 5) Parameter Tuning # 6) Bagged Boosting
7,301
aec374ffa368755350d0d75c96860f760e8524e1
from django.shortcuts import render, redirect from django.contrib import messages from django.contrib.auth import authenticate, login, logout from .models import Post from django.shortcuts import redirect from django.core.exceptions import ObjectDoesNotExist def index(request): blogs = Post.objects.filter(status=1).order_by('-created_on')[:10] context = {'Post': blogs} return render(request, 'blogapp/index.html', context) def blogs(request): return render(request, template_name='blogapp/blog.html') def detail(request, slug): try: post = Post.objects.get(slug=slug) context = {'post': post} return render(request, 'blogapp/detail.html', context) except ObjectDoesNotExist: return render(request, template_name='blogapp/detail.html') def about(request): return render(request, template_name='blogapp/about.html') def loginPage(request): # form = CreateUserForm() # context = {'form': form} if request.method == 'POST': username = request.POST.get('username') password = request.POST.get('password1') user = authenticate(username=username, password=password) if user is not None: login(request, user) return redirect('index') else: messages.error(request, 'Username or Password Incorrect') return render(request, 'accounts/login.html',) else: return render(request, 'accounts/login.html',) def logoutUser(request): logout(request) return redirect('login') def contact(request): return render(request, template_name='blogapp/contact.html') def products(request): return render(request, template_name='mainapp/products.html')
7,302
e8ef3a5e41e68b4d219aa1403be392c51cc010e6
""" 对自定义的类进行排序 """ import operator class User: def __init__(self, name, id): self.name = name self.id = id def __repr__(self): return 'User({},{})'.format(self.name, self.id) def run(): users = [User('wang', 1), User('zhao', 4), User('chen', 3), User('wang', 2)] # 这种方式相对速度快,也适用于min/max等 a = sorted(users, key=operator.attrgetter('id', 'name')) print(a) b = sorted(users, key=lambda r: (r.id, r.name)) print(b) if __name__ == '__main__': run()
7,303
562888201719456ed2f3c32e81ffd7d2c39dabc3
# Generated by Django 3.1.2 on 2020-10-25 01:19 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('jobs', '0001_initial'), ] operations = [ migrations.AddField( model_name='job', name='link', field=models.URLField(null=True), ), migrations.AddField( model_name='job', name='title', field=models.CharField(default=datetime.date(2020, 10, 25), max_length=200), preserve_default=False, ), ]
7,304
01e9ceb516a323a2017c65e368da419c6570dce2
# -* coding: utf-8 -*- # A headless media player based on gstreamer. from gi.repository import Gst Gst.init(None) class Player: def __init__(self, uri=None): # Creates a playbin (plays media from an uri). self.player = Gst.ElementFactory.make('playbin', 'player') self.uri = uri @property def uri(self): return self._uri @uri.setter def uri(self, value): self._uri = value self.player.set_state(Gst.State.NULL) if value: self.player.set_property('uri', value) def play(self): """Start playing""" self.player.set_state(Gst.State.PLAYING) def pause(self): """Pause playing""" self.player.set_state(Gst.State.PAUSED) def stop(self): self.player.set_state(Gst.State.NULL)
7,305
b6ee3c980357ab22a7969c21207b34546c87092d
from .exec_generator import *
7,306
bb64da929ff2e1e04267518ec93a28bedb5a4de5
# ---------------------MODULE 1 notes-------------------- # . # . # . # . # . # . # . # . # . # . # save as (file).py first if not it will not work print("Hello") # control s to save
7,307
0cf90cd7704db9f7467e458b402fadb01c701148
from search import SearchEngine import tkinter as tk if __name__ == "__main__": ghettoGoogle = SearchEngine() def searchButtonEvent(): search_query = searchQueryWidget.get() search_results = ghettoGoogle.search(search_query) resultsCanvas = tk.Tk() if search_results == None: tk.Label(resultsCanvas,text="No results",justify=tk.LEFT).pack(fill='both') else: searchTextBox = tk.Text(resultsCanvas,height=20,width=100) searchTextBox.pack(side=tk.LEFT,fill=tk.Y) scrollBar = tk.Scrollbar(resultsCanvas) scrollBar.pack(side=tk.RIGHT,fill=tk.Y) scrollBar.config(command=searchTextBox.yview) searchTextBox.config(yscrollcommand=scrollBar.set) searchTextBox.tag_config('Link',foreground='blue') for i in range(len(search_results)): searchTextBox.insert(tk.END,search_results[i][0]+"\n",'Link') searchTextBox.insert(tk.END,search_results[i][1]+"\n\n") canvas = tk.Tk() tk.Label(canvas, text = "Enter search query").grid(row = 0) searchQueryWidget = tk.Entry(canvas) searchQueryWidget.grid(row=0,column=1) tk.Button(canvas,text="Quit",command=canvas.quit).grid(row=1,column=0,sticky=tk.W) tk.Button(canvas,text="Search",command=searchButtonEvent).grid(row=1,column=0,sticky=tk.W) canvas.mainloop()
7,308
282dbdb3a8d9ed914e8ca5c7fa74d2873920e18c
def area (a, b): resultado = a * b return (resultado) def main(): #escribe tu código abajo de esta línea num1 = float(input("INTRODUCE LA BASE: ")) num2 = float(input("INTRODUCE LA ALTURA: ")) print ("EL AREA DEL RECTANGULO ES: ", area (num1, num2)) pass if __name__ == '__main__': main()
7,309
f311b803d8c0ee68bc43526f56e6b14f3a2836b8
#### As an example below shell script can be used to execute this every 300s. ####!/bin/bash ####while true ####do #### /usr/bin/sudo python3 /path/of/the/python/script.sh ####done #!/usr/bin/python import sys import time import paho.mqtt.client as mqtt broker_url = "<IP_Address_of_MQTT_broker>" broker_port = <MQTT_Broker_port> def on_connect(client, userdata, flags, rc): print("Connected With Result Code: {}".format(rc)) def on_message(client, userdata, message): print("Message Recieved: "+message.payload.decode()) file_name=message.payload.decode() file_path="/home/demouser/nagios/node-check/logs/"+file_name+".ok" file1 = open(file_path, 'w') file1.write(message.payload.decode()+" is up and running\n") file1.close() def on_disconnect(client, userdata, rc): print("Client Got Disconnected") client = mqtt.Client("Nagios_NodeChecker") client.on_connect = on_connect client.on_disconnect = on_disconnect client.on_message = on_message client.username_pw_set(username="<mqtt_username>",password="<mqtt_password>") client.connect(broker_url, broker_port) client.subscribe(topic="nagios/node_check", qos=2) client.message_callback_add("nagios/node_check", on_message) client.loop_start() time.sleep(300) client.loop_stop()
7,310
b5568e84e19719f0fd72197ead47bd050e09f55d
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # groupby() # groupby()把迭代器中相邻的重复元素挑出来放在一起: import itertools for key, group in itertools.groupby('ABAABBBCCAAA'): print(key, list(group)) # 小结 # itertools模块提供的全部是处理迭代功能的函数,它们的返回值不是list,而是Iterator,只有用for循环迭代的时候才真正计算。
7,311
54d6121898dc027d6ecaf9c9e7c25391778e0d21
#!/usr/bin/env python # -*- coding: utf-8 -*- # sockdemo.py # # test import struct, threading, signal a = '' if not a: print 'a' else: print 'b' import datetime, time, os print datetime.datetime.now().strftime('%m-%d %H:%M:%S') def double(x): return x*x arr = [1, 2, 3, 4, 5] print map(double, arr) print 2**16 print struct.calcsize('128s32sI8s') _pack = struct.pack('128s8sI8s','abc','huad',1,'666') print repr(_pack) a,b,c,d = struct.unpack('128s8sI8s',_pack) print a.strip('\00') now = datetime.datetime.now() isstop = False def handler(): print 'control C' isstop = True def doStress(): print 123222 while not isstop: time.sleep(1) print 'doStress', datetime.datetime.now() #signal.signal(signal.SIGINT, handler) #signal.signal(signal.SIGTERM, handler) t = threading.Thread(target=doStress, args=()) t.setDaemon(True) t.start() print 'complete', datetime.datetime.now()
7,312
5f84c8654c976bca2fa33e8f9ba5e28e3249253d
import numpy as np import faiss from util import vecs_io, vecs_util from time import time import os ''' 提取vecs, 输出numpy文件 ''' def vecs2numpy(fname, new_file_name, file_type, file_len=None): if file_type == 'bvecs': vectors, dim = vecs_io.bvecs_read_mmap(fname) elif file_type == 'ivecs': vectors, dim = vecs_io.ivecs_read_mmap(fname) elif file_type == 'fvecs': vectors, dim = vecs_io.fvecs_read_mmap(fname) if file_len is not None: vectors = vectors[:file_len] vectors = vectors.astype(np.float32) np.save(new_file_name, vectors) return vectors ''' 创建文件夹, 提取base, query, gnd ''' def get_base_query_gnd(config): os.system("mkdir %s" % (config['project_data_dir'])) print("创建文件夹") base_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['base']) base_npy_dir = '%s/%s' % (config['project_data_dir'], 'dataset.npy') base = vecs2numpy(base_dir, base_npy_dir, config['dataset_type']) print("提取base") query_dir = '%s/%s' % (config['source_data_dir'], config['source_data_fname']['query']) query_npy_dir = '%s/%s' % (config['project_data_dir'], 'queries.npy') query = vecs2numpy(query_dir, query_npy_dir, config['dataset_type']) print("提取query") gnd_npy_dir = '%s/%s' % (config['project_data_dir'], 'answers.npy') # print(base_npy_dir) # print(query_npy_dir) # print(gnd_npy_dir) gnd = vecs_util.get_gnd_numpy(base, query, config['k_gnd'], gnd_npy_dir) print("提取gnd") return base, query, gnd if __name__ == '__main__': fname = '/home/bz/learn-to-hash/data/sift/sift_dataset_unnorm.npy' new_fname = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt' get_NN_graph(fname, new_fname, 10) a = '/home/bz/KaHIP/deploy/graphchecker' b = '/home/bz/learn-to-hash/data/sift/sift_graph_10/test_graph.txt'
7,313
8340872f03c1bf7c1aee0c437258ac8e44e08bb8
# 5.2 Training a convnet from scratch on a "small dataset" (p.131) # Preprocessing (p.133) # Copying images to train, validation and test directories import os, shutil # The path to the directory where the original dataset was uncompressed original_dataset_dir = 'E:/train/' # The directory where we will store our smaller dataset base_dir = 'E:/train/smaller' os.mkdir(base_dir) # Directories for our training, validation, and test splits train_dir = os.path.join(base_dir, 'train') os.mkdir(train_dir) validation_dir = os.path.join(base_dir, 'validation') os.mkdir(validation_dir) test_dir = os.path.join(base_dir, 'test') os.mkdir(test_dir) # Directory with our training cat pictures train_cats_dir = os.path.join(train_dir, 'cats') os.mkdir(train_cats_dir) # Directory with our training dog pictures train_dogs_dir = os.path.join(train_dir, 'dogs') os.mkdir(train_dogs_dir) # Directory with our validation cat pictures validation_cats_dir = os.path.join(validation_dir, 'cats') os.mkdir(validation_cats_dir) # Directory with our validation dog pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') os.mkdir(validation_dogs_dir) # Directory with our validation cat pictures test_cats_dir = os.path.join(test_dir, 'cats') os.mkdir(test_cats_dir) # Directory with our validation dog pictures test_dogs_dir = os.path.join(test_dir, 'dogs') os.mkdir(test_dogs_dir) # Copy first 1000 cat images to train_cats_dir fnames = ['cat.{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_cats_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to validation_cats_dir fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_cats_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to test_cats_dir fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_cats_dir, fname) shutil.copyfile(src, dst) # Copy first 1000 dog images to train_dogs_dir fnames = ['dog.{}.jpg'.format(i) for i in range(1000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_dogs_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to validation_dogs_dir fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_dogs_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to test_dogs_dir fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)] for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_dogs_dir, fname) shutil.copyfile(src, dst) # 5.2.3 Building our network from keras import layers from keras import models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Conv2D(128, (3, 3), activation='relu')) model.add(layers.MaxPool2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) from keras import optimizers model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc']) # 5.2.4 Data preprocessing (p.137) # 1) Read the picture files. # 2) Decode the JPEG content to RBG grids of pixels. # 3) Convert these into floating point tensors. # 4) Rescale the pixel values (between 0 and 255) to the [0, 1] interval # (as you know, neural networks prefer to deal with small input values). from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale=1.0 / 255) test_datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary') validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary') # Listing 5.16 Fitting our model using a batch generator (p.138)
7,314
800d87a879987c47f1a66b729932279fc8d4fa38
#Homework 2 PyPoll #The total number of votes cast #A complete list of candidates who received votes #The percentage of votes each candidate won #The total number of votes each candidate won #The winner of the election based on popular vote. #First we'll import the os module # This will allow us to create file paths across operating systems import os #currentDirectory = os.getcwd() # Module for reading CSV files import csv csvpath = os.path.join('election_data.csv') with open('election_data.csv') as csvfile: # CSV reader specifies delimiter and variable that holds contents csvreader = csv.reader(csvfile, delimiter=',') print(csvreader)
7,315
3ef114dd35ef3995ae73bf85bbe38db4fb7045d8
#from __future__ import absolute_import #import os from celery import Celery #from django.conf import settings #os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'learning.settings') app = Celery('tasks', broker="redis://localhost") #app.config_from_object('django.conf:settings') #app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) @app.task def add(x, y): return x+y #print('Request:{0!r}'.format(self.request))
7,316
7c80c98e32f386362003ac3cd729fa9b279b8e8e
import numpy as np import cv2 import serial import serial.tools.list_ports import time import random import math #import mcpi.minecraft as minecraft #import mcpi.block as block #from house import House #Arduino Serials ports = list(serial.tools.list_ports.comports()) print (ports) for p in ports: print (p[1]) if "Arduino" in p[1]: ser=serial.Serial(port=p[0]) else : print ("No Arduino Device was found connected to the computer") #time.sleep(2) #face detection cap =cv2.VideoCapture(1) face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml') eye_cascade = cv2.CascadeClassifier('./haarcascade_eye.xml') lastpos=0 currentpos=0 lastdis=0 currentdis=0 lastx_d=0 currentx_d=0 shoot=0 #MC #mc=minecraft.Minecraft.create() #pos=mc.player.getTilePos() #pos0=[] #pos0.append(pos.x) #pos0.append(pos.y) #pos0.append(pos.z) #des=House([pos.x+20,pos.y,pos.z],mc,block.GOLD_BLOCK.id,block.GLASS.id) #des.buildall() ct=0 while(True): ct+=1 #到达目的地了吗 #if(des.isInsideHouse()): #mc.postToChat("You win") #break #人脸识别,一方面投石机追踪,一方面控制MC里面人到Destinatioin ret,img=cap.read() center=[img.shape[0]/2,img.shape[1]/2] faces = face_cascade.detectMultiScale(img, 1.3, 5) tmp=0 for(x,y,w,h) in faces: tmp+=1 if(tmp>1): print("too many faces") else: for (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_color = img[y:y+h, x:x+w] x_d=x+w/2-325-73 dis=(-0.88*w+220) angle=x_d#math.atan(x_d/dis)/3.1415926535897*180 currentpos=angle currentdis=dis currentx_d=x_d if(ct==1): lastpos=currentpos lastdis=currentdis lastx_d=currentx_d #pos=mc.player.getTilePos() #mc.player.setTilePos([pos.x+(currentx_d-lastx_d)/5,pos.y,pos.z+(currentdis-lastdis)/5]) #print(x_d) #print(angle) #ser.write print(str(int(angle)).encode()) #ser.write if(angle<0): ser.write(str(int(angle)).encode()) else: ser.write(("+"+str(int(angle))).encode()) time.sleep(1) if((lastpos-currentpos)<10 and abs(angle)<15): shoot+=1 if(shoot>1): time.sleep(2) #mc.player.setTilePos([0,-1000,0]) ser.write(str(10000).encode()) time.sleep(2) shoot=0 lastpos=currentpos lastdis=currentdis lastx_d=currentx_d cv2.imshow('img',img) if cv2.waitKey(1)& 0xFF==ord('q'): break cap.release() cv2.destroyAllWindows()
7,317
50e759ff24cdb8fbb5a98d9381afb13ebc1a74f1
import json from bottle import request, response, route, get, run, default_app app = application = default_app() @route('/candidate/hired', method=['POST']) def update_delete_handler(): response.content_type = 'application/json' return json.dumps({"hired": True}) def main(): run(host='localhost', port=8080)
7,318
ad84a5bfcf82dff1f4a7e8f08f3c4243ad24de52
from foods.fruits import * orange.eat() apple.eat()
7,319
2867a7b24b4911b2936cb34653fa57431c14d6a3
#Displaying multiple images using matplotlib import pandas as pd import numpy as np import cv2 import matplotlib.pyplot as plt def main(): imgpath1="C:\Shreyas\OpenCv\DIP_OpenCV\lena.png" imgpath2="C:\Shreyas\OpenCv\DIP_OpenCV\lena.png" img1=cv2.imread(imgpath1,1) img2=cv2.imread(imgpath2,1) titles = ['Pepper Gray', 'Peppers Color'] images = [img1, img2] for i in range(2): plt.subplot(1, 2, i+1) plt.imshow(images[i]) plt.title(titles[i]) plt.xticks([]) plt.yticks([]) plt.show() if __name__ == "__main__": main()
7,320
8142585827590f6d951f0fcc375e8511aa75e9c8
# from https://github.com/tensorflow/models/tree/master/research/object_detection/dataset_tools # and https://gist.github.com/saghiralfasly/ee642af0616461145a9a82d7317fb1d6 import tensorflow as tf from object_detection.utils import dataset_util import os import io import hashlib import xml.etree.ElementTree as ET import random from PIL import Image def create_example(xml_file): tree = ET.parse(xml_file) root = tree.getroot() image_name = root.find('filename').text file_name = image_name.encode('utf8') size=root.find('size') width = int(size[0].text) height = int(size[1].text) xmin = [] ymin = [] xmax = [] ymax = [] classes = [] classes_text = [] truncated = [] poses = [] difficult_obj = [] for member in root.findall('object'): classes_text.append(member[0].text) def class_text_to_int(row_label): if row_label == 'car-red': return 1 if row_label == 'car-blue': return 2 if row_label == 'phone': return 3 classes.append(class_text_to_int(member[0].text)) xmin.append(float(member[4][0].text) / width) ymin.append(float(member[4][1].text) / height) xmax.append(float(member[4][2].text) / width) ymax.append(float(member[4][3].text) / height) difficult_obj.append(0) truncated.append(0) poses.append('Unspecified'.encode('utf8')) full_path = os.path.join('./data/images', '{}'.format(image_name)) with tf.gfile.GFile(full_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) if image.format != 'JPEG': raise ValueError('Image format not JPEG') key = hashlib.sha256(encoded_jpg).hexdigest() example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(file_name), 'image/source_id': dataset_util.bytes_feature(file_name), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), 'image/object/difficult': dataset_util.int64_list_feature(difficult_obj), 'image/object/truncated': dataset_util.int64_list_feature(truncated), 'image/object/view': dataset_util.bytes_list_feature(poses), })) return example def main(_): writer_train = tf.python_io.TFRecordWriter('./data/train.record') writer_test = tf.python_io.TFRecordWriter('./data/test.record') filename_list=tf.train.match_filenames_once("./data/annotations/*.xml") init = (tf.global_variables_initializer(), tf.local_variables_initializer()) sess=tf.Session() sess.run(init) list=sess.run(filename_list) random.shuffle(list) i=1 tst=0 trn=0 for xml_file in list: example = create_example(xml_file) if (i%5)==0: writer_test.write(example.SerializeToString()) tst=tst+1 else: writer_train.write(example.SerializeToString()) trn=trn+1 i=i+1 print(xml_file) writer_test.close() writer_train.close() print('Successfully converted dataset to TFRecord.') print('training dataset: # ') print(trn) print('test dataset: # ') print(tst) if __name__ == '__main__': tf.app.run()
7,321
1c5cb9363c2903905f1026ede77615e8373c250b
from django.db import models # Create your models here. class Login(models.Model): trinity_id = models.CharField('',max_length=200) trinity_password = models.CharField('',max_length=500) objects = models.Manager()
7,322
c91be6cc332139c5b1e7ee5a3512482d0f8620b1
def selectionSort(arr, low, high): for i in range(len(arr)): mini = i for j in range(i + 1, len(arr)): if arr[mini] > arr[j]: mini = j arr[i], arr[mini] = arr[mini], arr[i] return arr
7,323
58667da8898c2277ecc3d9d738d6553dd3416436
############################################-############################################ ################################ F I L E A U T H O R S ################################ # MIKE - see contacts in _doc_PACKAGE_DESCRIPTION ####################################### A B O U T ####################################### # In this module: # I clean the out put directories ####################################### S T A R T ####################################### import _cfg_GLOBAL as CFG import os import LOG import UTILITY as UTL import datetime def some_func(): CFG.start_clock_module = datetime.datetime.now() LOG.write_me("\tSTART - CLEAN.py (" + datetime.datetime.now().strftime("%y-%m-%d | %H:%M") + ")") my_root_dir = os.getcwd() list_output_dir = list() list_of_files = list() LOG.write_me("\t\tList of the files deleted from the 'OUTPUT' folders:") for root, dirs, files in os.walk(my_root_dir): if not str(root).endswith("ABACUS"): if "OUTPUT_" in str(root): for file in files: if str(file).endswith(".txt"): rel_path_file = os.path.relpath(root, my_root_dir) + "/" + file LOG.write_me("\t\t- " + rel_path_file ) path_file = root + "\\" + file os.remove(path_file) list_of_files.append(rel_path_file) if len(list_of_files) == 0: LOG.write_me("\t\t\t- No output file to clean") elapsed_formatted = UTL.format_elapsed(CFG.start_clock_module) LOG.write_me("\tEND - CLEAN.py (" + datetime.datetime.now().strftime("%y-%m-%d | %H:%M") + " | hh.mm.ss.ms " + elapsed_formatted + ")") LOG.write_me("") LOG.write_me("") if __name__ == '__main__': some_func()
7,324
1ebf92cf40053e561b04a666eb1dd36f54999e2c
if __name__ == '__main__': import sys import os.path srcpath = sys.argv[1] if len(sys.argv) >= 1 else './' verfn = sys.argv[2] if len(sys.argv) >= 2 else None try : with open(os.path.join(srcpath,'.svn/entries'),'r') as fp: x = fp.read().splitlines()[3] if verfn : with open(verfn,'w') as fp : fp.write(x) else : sys.stdout.write(x) except IOError, e : import traceback traceback.print_exc() pass
7,325
3f1715763a066fb337b3ff3d03e3736d0fb36b3f
from PyQt5 import QtCore from PyQt5.QtWidgets import QTableWidgetItem, QDialog from QT_view.PassportAdd import PassportAddDialog from QT_view.PassportWin import Ui_Dialog from Repository.Rep_Passport import PassportRepository class PassportQt(QDialog): def __init__(self): super(PassportQt, self).__init__() self.passport_rep = PassportRepository() self.initUI() def initUI(self): self.ui = Ui_Dialog() self.ui.setupUi(self) self.ui.tableWidget.setColumnWidth(1, 259) self.ui.tableWidget.setSelectionBehavior(1) self.ui.tableWidget.setSelectionMode(1) self.ui.pushButton.clicked.connect(self.click_add) self.ui.pushButton_2.clicked.connect(self.click_edit) self.ui.pushButton_3.clicked.connect(self.click_del) self.ui.pushButton_4.clicked.connect(self.click_cancel) passport = self.passport_rep.get_passports() self.ui.tableWidget.setRowCount(len(passport)) row = 0 for i in passport: id_passport = QTableWidgetItem(str(i['id'])) id_passport.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) serial_passport = QTableWidgetItem(i['serial']) serial_passport.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) number_passport = QTableWidgetItem(i['number']) number_passport.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) self.ui.tableWidget.setItem(row, 0, id_passport) self.ui.tableWidget.setItem(row, 1, serial_passport) self.ui.tableWidget.setItem(row, 2, number_passport) row += 1 def click_add(self): p_dict = {'id': -1, 'serial': "", 'number': ""} self.passport_rep.set_dict(p_dict) passport_add = PassportAddDialog(self.passport_rep) if (passport_add.exec()): passport_d = self.passport_rep.get_dict() count_row = self.ui.tableWidget.rowCount() self.ui.tableWidget.setRowCount(count_row + 1) id_passport = QTableWidgetItem(str(passport_d['id'])) id_passport.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) serial = QTableWidgetItem(passport_d['serial']) serial.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) number = QTableWidgetItem(passport_d['number']) number.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) self.ui.tableWidget.setItem(count_row, 0, id_passport) self.ui.tableWidget.setItem(count_row, 1, serial) self.ui.tableWidget.setItem(count_row, 2, number) def click_edit(self): edit_list = self.ui.tableWidget.selectedItems() if (len(edit_list)): select_row = self.ui.tableWidget.currentRow() edit_d = {'id': int(edit_list[0].text()), 'serial': edit_list[1].text(), 'number': edit_list[2].text()} self.passport_rep.set_dict(edit_d) passport_edit = PassportAddDialog(self.passport_rep) if (passport_edit.exec()): passport_d = self.passport_rep.get_dict() id_passport = QTableWidgetItem(str(passport_d['id'])) id_passport.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) serial = QTableWidgetItem(passport_d['serial']) serial.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) number = QTableWidgetItem(passport_d['number']) number.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled ) self.ui.tableWidget.setItem(select_row, 0, id_passport) self.ui.tableWidget.setItem(select_row, 1, serial) self.ui.tableWidget.setItem(select_row, 2, number) def click_del(self): del_list = self.ui.tableWidget.selectedItems() if (len(del_list)): del_p = {'id': int(del_list[0].text()), 'serial': del_list[1].text(), 'number': del_list[2].text()} self.passport_rep.del_passport(del_p) self.ui.tableWidget.removeRow(del_list[0].row()) def click_cancel(self): self.accept()
7,326
5af5c10c149c7b0e2a969be7895780d26a4294d0
import csv with open('faculty.csv') as facultycsv: emails = list() #all email addresses for line in facultycsv: line = line.split(',') if line[0] == 'name' : continue try: email = line[3].rstrip() emails.append(email) except: continue with open('emails.csv', 'w') as emailcsv: writer = csv.writer(emailcsv, quoting=csv.QUOTE_MINIMAL) for email in emails: writer.writerow([email])
7,327
956adc5961188458393b56564649ad0a3a787669
x = 5 y = x print(id(x)) print(id(y)) print() y = 3 print(id(x)) print(id(y)) print() z = [1, 4, 3, 25] w = z print(z) print(w) print(id(z)) print(id(w)) print() w[1] = 10 print(z) print(w) print(id(z)) print(id(w)) # So when you assign a mutable, you're actually assigning a reference to the mutable, # and I have the side effect that when I change an element of that list in one place, # it gets changed in both places because it's really just one object, and functions work exactly the same way.
7,328
af5ebdcd818fdf9c607240733b7b5dbb793cf55e
# put your python code here a = int(input()) b = int(input()) # and i = 1 if a == b: print(a) else: while True: if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0: print(i) break else: i += 1
7,329
3b9193fcd69b0387222feab96c50bf3617606cdd
from typing import List, cast import numpy as np from ..dataset import Transition from .base import TransitionIterator class RandomIterator(TransitionIterator): _n_steps_per_epoch: int def __init__( self, transitions: List[Transition], n_steps_per_epoch: int, batch_size: int, n_steps: int = 1, gamma: float = 0.99, n_frames: int = 1, real_ratio: float = 1.0, generated_maxlen: int = 100000, ): super().__init__( transitions=transitions, batch_size=batch_size, n_steps=n_steps, gamma=gamma, n_frames=n_frames, real_ratio=real_ratio, generated_maxlen=generated_maxlen, ) self._n_steps_per_epoch = n_steps_per_epoch def _reset(self) -> None: pass def _next(self) -> Transition: index = cast(int, np.random.randint(len(self._transitions))) transition = self._transitions[index] return transition def _has_finished(self) -> bool: return self._count >= self._n_steps_per_epoch def __len__(self) -> int: return self._n_steps_per_epoch
7,330
5c4a48de94cf5bfe67e6a74c33a317fa1da8d2fa
from django.db import models from django.urls import reverse from django.conf import settings from embed_video.fields import EmbedVideoField from django.contrib.auth.models import AbstractBaseUser User = settings.AUTH_USER_MODEL # Create your models here. """class User(models.Model): username = models.CharField(max_length=20) created_at = models.DateTimeField() is_enabled = models.BooleanField(default=True) email = models.EmailField() password = models.CharField(max_length=20) def __str__(self): return self.username""" class Post(models.Model): is_enabled = models.BooleanField(default=True) parent = models.ForeignKey( 'self', on_delete=models.PROTECT, blank=True, null=True, default='' ) text = models.TextField() created_at = models.DateTimeField(auto_now_add=True, blank=True) author = models.ForeignKey( User, on_delete=models.PROTECT ) class Meta: ordering = ['parent_id', 'created_at'] def display_text(self): short = " ".join(self.text.split()[0:5]) if len(short) > 20: short = self.text[:20] + "..." return short display_text.short_description = 'Text' def __str__(self): space = " " return f'{space.join(self.text.split()[0:5])} ({str(self.created_at)})' def get_absolute_url(self): return reverse('post-detail', args=[str(self.id)]) class Item(models.Model): video = EmbedVideoField()
7,331
86345702bcd423bc31e29b1d28aa9c438629297d
# -*- coding: utf-8 -*- """ Created on Wed Aug 18 16:11:44 2021 @author: ignacio """ import matplotlib.pyplot as plt from numpy.linalg import inv as invertir from time import perf_counter import numpy as np def matriz_laplaciana(N, t=np.single): # funcion obtenida de clase e=np.eye(N)-np.eye(N,N,1) return t(e+e.T) Ns = [2, 5, 10,12, 15, 20, 30, 40, 45, 50, 55, 60, 75, 100, 125, 160, 200, 250, 350, 500, 600, 800, 1000, 2000, 5000, 10000] corridas = 10 for corrida in range(corridas): tiempo = [] memoria = [] name = (f"single{corrida}.txt") fid = open(name,"w") for i in Ns: print(f"i = {i}") A = matriz_laplaciana(i) t1 = perf_counter() invertir(A) t2 = perf_counter() dt = t2 - t1 size = 3 * (i**2) * 32 tiempo.append(dt) memoria.append(size) fid.write(f"{i} {dt} {size}\n") print(f"Tiempo transcurrido = {dt} s") print(f"Mmoria usada = {size} bytes") fid.flush() fid.close() dim = [] tim = [] mem = [] for n in range(10): dimension = [] time = [] memory = [] with open(f"single{n}.txt", "r") as f: lineas = [linea.split() for linea in f] for i in lineas: dimension.append(int(i[0])) time.append(float(i[1])) memory.append(int(i[2])) dim.append(dimension) tim.append(time) mem.append(memory) #Grafico superior plt.subplot(2, 1, 1) plt.plot(dim[0],tim[0],"-o") plt.plot(dim[0],tim[1],"-o") plt.plot(dim[0],tim[2],"-o") plt.plot(dim[0],tim[3],"-o") plt.plot(dim[0],tim[4],"-o") plt.plot(dim[0],tim[5],"-o") plt.plot(dim[0],tim[6],"-o") plt.plot(dim[0],tim[7],"-o") plt.plot(dim[0],tim[8],"-o") plt.plot(dim[0],tim[9],"-o") plt.yscale('log') plt.xscale('log') xticks = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000] xtext = ["", "", "", "", "", "", "", "", "", "", ""] yticks = [0.1/1000, 1/1000, 10/1000, 0.1, 1, 10, 60, 600] ytext = ["0.1 ms", "1 ms", "10 ms", "0.1 s", "1 s", "10 s", "1 min", "10 min"] plt.yticks(yticks, ytext) plt.xticks(xticks, xtext) plt.title("Rendimiento caso1_single") plt.ylabel("Tiempo transcurrido (s)") plt.grid(True) #Grafico inferior plt.subplot(2, 1, 2) plt.plot(Ns,memoria,'-ob') plt.yscale('log') plt.xscale('log') xticks = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000] xtext = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000] yticks = [1000,10000, 100000, 1000000, 10000000, 100000000, 1000000000, 100000000000] ytext = ["1 KB ", "10 KB", "100 KB", "1 MB", "10 MB", "100 MB", "1 GB", "10 GB"] plt.axhline(y=4000000000, linestyle="--",color="black") # RAM 4 GB plt.yticks(yticks, ytext) plt.xticks(xticks, xtext, rotation=45) plt.xlabel("Tamaño matriz N") plt.ylabel("Uso memoria (bytes)") plt.grid(True) plt.savefig("Rendimiento caso1_single.png")
7,332
84a63f60a45f1f8fc1efec8f30345a43c3c30c63
def html_print(text, title=''): from IPython.core.display import display, HTML # create title for the content display(HTML("<h4>" + str(title) + "</h4>")) # create content html = display(HTML("<font size=2 face=Verdana>" + text + "</font>")) return html
7,333
aafadcbf946db8ed85e3df48f5411967ec35c318
#!/usr/bin/env python # ---------------------------------------------------------- # RJGlass Main Program version 0.2 8/1/07 # ---------------------------------------------------------- # Copyright 2007 Michael LaBrie # # This file is part of RJGlass. # # RJGlass is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # RJGlass is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # --------------------------------------------------------------- import sys, os, time #Load the modules needed for RJGlass. from OpenGL.GL import * from OpenGL.GLUT import * from OpenGL.GLU import * #pygame needed for sound in server_only (so load either way) import pygame from pygame.locals import * from pygame import image from guage import * #All add on guage functions colors etc. #This is code to import config file (config.py) try: import config except ImportError: # We're in a py2exe, so we'll append an element to the (one element) # sys.path which points to Library.zip, to the directory that contains # Library.zip, allowing us to import config.py # Adds one level up from the Library.zip directory to the path, so import will go forward sys.path.append(os.path.split(sys.path[0])[0]) import config class screen_c(object): #This controls what is in each screen. def __init__(self, x, guage_list=[]): self.guage_list = [] #list of guages to cycle through. self.guage_index = 0 self.x = x self.y = 0 self.width = 512 self.heigth = 768 self.add_guage_list(guage_list) def add_guage_list(self,glist): for g in glist: self.append_guage(guage_dict[g]) def append_guage(self,guage): self.guage_list.append(guage) def cycle(self): self.guage_index +=1 if self.guage_index >= len(self.guage_list): self.guage_index =0 def cycle_reverse(self): self.guage_index -=1 if self.guage_index <0: self.guage_index = len(self.guage_list) -1 def active_guage(self): return self.guage_list[self.guage_index] #this is a static function not specificaly for the screen. #the eventhandlers have references to the screens so it is easier to #get the guage references by name through this object. def gauge_by_name(self,name): return guage_dict[name] def draw(self, aircraft): self.guage_active = self.guage_list[self.guage_index] self.guage_active.draw(aircraft, self.x, self.y) def InitPyGame(): glutInit(()) pygame.init() if config.full_screen: s = pygame.display.set_mode((1024,768), DOUBLEBUF|OPENGL|FULLSCREEN) else: s = pygame.display.set_mode((1024,768), DOUBLEBUF|OPENGL) return s def InitView(smooth, width, heigth): global x_s, y_s, scissor glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) glLoadIdentity() glOrtho(0,width,0.0,heigth,-1.0,1.0) x_s = width/1024.0 y_s = heigth/768.0 glScalef(x_s, y_s, 1.0) scissor.x_s = x_s scissor.y_s = y_s if smooth: #Enable Smoothing Antianalising glEnable(GL_LINE_SMOOTH) glEnable(GL_BLEND) #glBlendFunc(GL_SRC_ALPHA, GL_ZERO) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE) #glDisable(GL_DEPTH_TEST) #Clear Screen #glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) def DisplaySplash(filename, delay, window_x, window_y): #Display needs to be initialized first. i = image.load(filename) splash_image = bitmap_image(i) #Determine the x and y coords to put in center of screen. splash_x = (window_x / 2) - (splash_image.w/2) splash_y = (window_y /2) - (splash_image.h/2) glRasterPos3f(splash_x,splash_y,0) glDrawPixels(splash_image.w, splash_image.h, GL_RGBA, GL_UNSIGNED_BYTE, splash_image.tostring) pygame.display.flip() time.sleep(delay) def DrawWindow(left_screen, right_screen): def divider(): #Dividing vertical white line between instruments glColor(white) glLineWidth(2.0) glBegin(GL_LINES) glVertex2f(512.0, 0.0) glVertex2f(512.0, 768.0) glEnd() def draw_nodata(x,y): #Draw no data text on screen. glColor(red) glLineWidth(5.0) glPushMatrix() glTranslatef(x,y,0) glScalef(0.4,0.4,1.0) glText("NO SIM DATA", 100) glPopMatrix() global count divider() #PFD.draw(aircraft_data,250,445) left_screen.draw(aircraft_data) #ND.draw(aircraft_data,512+256, 400) #FMS.draw(aircraft_data,512+256, 0) right_screen.draw(aircraft_data) glDisable(GL_SCISSOR_TEST) #Disable any scissoring. draw_FPS(20,740, aircraft_data.frame_time) #If Nodata is coming from Flight Sim, show on screen if aircraft_data.nodata: draw_nodata(50,500) count = count +1 #Used for FPS calc def MainLoop(mode, server_only): #global window global starttime global count global mode_func, left_screen, right_screen, eventhandler # Start Event Processing Engine starttime = time.time() # Used for FPS (Frame Per Second) Calculation if (server_only): #Set up correct function for selected mode mode_func = aircraft_data.get_mode_func(mode) else: left_screen = screen_c(256,config.left_screen) right_screen = screen_c(512+256,config.right_screen) # left_screen.add_guage_list(config.left_screen) # right_screen.add_guage_list(config.right_screen) #Set up correct function for selected mode mode_func = aircraft_data.get_mode_func(mode, left_screen, right_screen) #Setup Keyboard #keys.setup_lists(aircraft_data) #Inititalize View #left_screen = screen_c(256, [PFD, ND, FMS]) eventhandler = event_handler.event_handler_c(aircraft_data,FMS, right_screen, left_screen) #Load textures, and guages that use them FMS.load_texture() EICAS1.load_texture() EICAS2.load_texture() RADIO.load_texture() if server_only: server_loop() else: graphic_loop() def graphic_loop(): #This is the loop for the non server mode. Gauges drawn. while not (aircraft_data.quit_flag): glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT) #Clear Screen #Update globaltime aircraft_data.globaltime = time.time() globaltime.update(time.time()) DrawWindow(left_screen, right_screen) pygame.display.flip() #Update screen mode_func() #Run aircraft mode function, to do all teh calaculations etc. # Check for keypresses eventhandler.check_events(pygame.event.get(), globaltime.value) def server_loop(): #This is the loop for the server only mode. No Guages Drawn while not (aircraft_data.quit_flag): #Update globaltime aircraft_data.globaltime = time.time() globaltime.update(time.time()) mode_func() #Run aircraft mode function, to do all teh calaculations etc. time.sleep(0.01) #Throw in some time delay, since no guages are being drawn. # Check for keypresses #eventhandler.check_events(pygame.event.get(), globaltime.value) def Init_Graphics(x,y): InitPyGame() InitView(True, x,y) def Initialize(server_only): #if server_only True then server will just be run, No Graphics #Initialize count for FPS calc global count count = 0 if (not server_only): Init_Graphics(config.window_x, config.window_y) #Draw Splash Screen if config.splash: DisplaySplash(config.splash_filename, config.splash_delay, config.window_x, config.window_y) def ShutDown(mode, server_only): #Close LogFile datafile.close() #Close pygame mixer pygame.mixer.quit() #Print average Frames per second on shutdown print "FPS ", count / (time.time() - starttime) #Try to kill the thread if it exists. Closes it down on exit aircraft_data.AP.quit() #only here to close debugging files if present. if ((mode != config.TEST) & (mode != config.CLIENT)): #If simconnected connected, kill the thread. aircraft_data.kill_SimConnect() def CheckArg(arg, mode, server_only, addr): if 'server' in arg: server_only = True elif 'guage' in arg: server_only = False if 'client' in arg: mode = config.CLIENT elif 'test' in arg: mode = config.TEST for a in arg: if 'addr' in a: addr = a.split('=')[1] return mode, server_only, addr #=========================================================================== #Main program starts here #=========================================================================== #Check arguments first, and get mode and server_only flags mode, server_only, addr = CheckArg(sys.argv, config.mode, config.server_only, config.addr) #config.addr = addr #print addr Initialize(server_only) #Import guage files. import aircraft #Does all of the aircraft_data import event_handler #Handles all keyboard commands import variable if (not server_only): import PFD_mod import ND_mod import EICAS1_mod import EICAS2_mod import FMS_guage import radio_mod #Create Guages aircraft_data = aircraft.data() variables = variable.variable_c(aircraft_data) if (not server_only): PFD = PFD_mod.PFD_Guage() ND = ND_mod.ND_Guage() FMS = FMS_guage.FMS_guage_c() EICAS1 = EICAS1_mod.EICAS1_guage() EICAS2 = EICAS2_mod.EICAS2_guage() ND.initialize(aircraft_data) RADIO = radio_mod.radio_guage() guage_dict= { "RADIO":RADIO,"PFD":PFD,"ND":ND,"FMS":FMS, "EICAS1":EICAS1,"EICAS2":EICAS2 } print "Main Loop" #Run main, and get window size and operation mode from config file. config.py MainLoop(mode, server_only) #=================== # Shuting Down #=================== ShutDown(mode, server_only)
7,334
4843239a41fe1ecff6c8c3a97aceef76a3785647
import operator def group_by_owners(files): print(files, type(files)) for k, v in files.items(): # for v in k: print(k, v) # if k[v] == k[v]: # print("same", v) for f in files: print(f[0]) for g in v: print(g) _files = sorted(files.items(), key=operator.itemgetter(1), reverse=False) print("Sorted: ", _files, type(_files)) # files = files.items() # print(files, type(files)) # return None files = { 'Input.txt': 'Randy', 'Code.py': 'Stan', 'Output.txt': 'Randy' } print(group_by_owners(files))
7,335
5a13c7e3be8a0b5f3baf7106a938fc97f078c5bc
''' Created on May 17, 2016 @author: Shauryadeep Chaudhuri ''' import json import tornado from engine import Constants as c from engine.ResultGenerator import ResultGenerator from ..ServerLogger import ServerLogger class GetFromURL(tornado.web.RequestHandler): ''' This class fetches the data requested like index,schema,entry,query from the url and responds with the result ''' def initialize(self): self.logger = ServerLogger().getLogger() def get(self, index=None, schema=None, entry=None, query=None): query = dict() resultGenerator = ResultGenerator() query[c.OPERATION] = c.GET if index: query[c.INDEX] = index if schema: query[c.SCHEMA] = schema if entry: query[c.ENTRY] = entry self.logger.debug("Internal Query Generated"+str(query)) try: result = str(resultGenerator.processQuery(json.dumps(query))) self.logger.info("Result fetched:" + result) self.write(result) except Exception as e: self.logger.error('Error', exc_info=True) self.write("Error: " + str(e))
7,336
eb1fbe2de3c8548175eb3c8720353e466e3b68c7
import rdflib import csv from time import sleep gtypes = {} dtypes = {} atypes = {} g = rdflib.Graph() g.parse("http://geographicknowledge.de/vocab/CoreConceptData.rdf#") g.parse("./ontology.ttl", format="ttl") sleep(.5) results = g.query(""" prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . filter ( ?type in ( ccd:PointDataSet, ccd:RegionDataSet, ccd:VectorTessellation, ccd:LineDataSet ) ) } """) for result in results: uri, geometry_type = result gtypes[str(uri)] = str(geometry_type).split('#')[1] results = g.query(""" prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix dcat: <https://www.w3.org/TR/vocab-dcat#> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?type where { ?dataset a dcat:Dataset , ?type . ?type rdfs:subClassOf+ ccd:CoreConceptDataSet . } """) for result in results: uri, dtype = result dtypes[str(uri)] = str(dtype).split('#')[1] results = g.query(""" prefix skos: <http://www.w3.org/2004/02/skos/core#> prefix ccd: <http://geographicknowledge.de/vocab/CoreConceptData.rdf#> prefix ada: <http://geographicknowledge.de/vocab/AnalysisData.rdf> prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> select ?dataset ?label ?type where { ?attribute ada:ofDataSet ?dataset ; skos:exactMatch ?concept ; rdfs:label ?label . optional { ?concept a ?type . ?type rdfs:subClassOf+ ccd:Attribute . } } group by ?dataset ?label ?type """) for result in results: dataset, label, atype = result key = (str(dataset), str(label)) if atype is None and key not in atypes: atypes[key] = "" elif atype is not None: atypes[key] = str(atype).split('#')[1] test_gtypes = {} test_dtypes = {} test_atypes = {} with open("./datasets/annotations_datasets.csv", 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_gtypes[row[0]] = row[1] test_dtypes[row[0]] = row[2] with open("./datasets/annotations_attributes.csv", 'r') as fin: reader = csv.reader(fin) next(reader) for row in reader: test_atypes[(row[0],row[1])] = row[2] tp = 0 total = 0 fn = len(test_gtypes) for k, v in gtypes.items(): if k not in test_gtypes: # skip some extra test datasets continue total += 1 if test_gtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * ((p * r) / (p + r)) print("Geometry type scores:") print(f"P: {p} , R: {r} , F: {f}") tp = 0 total = 0 fn = len(test_dtypes) for k, v in dtypes.items(): if k not in test_dtypes: continue total += 1 if test_dtypes[k] == v: tp += 1 fn -= 1 p = tp / total r = tp / (tp + fn) f = 2 * ((p * r) / (p + r)) print("Dataset type scores:") print(f"P: {p} , R: {r} , F: {f}") filter_nontypes = True if filter_nontypes: test_atypes = {k: v for k, v in test_atypes.items() if v != ""} atypes = {k: v for k, v in atypes.items() if v != ""} tp = 0 total = 0 fn = len(list(filter(lambda x: x != "", test_atypes.values()))) for k, v in atypes.items(): if k not in test_atypes: continue if v != "": total += 1 if test_atypes[k] == v: tp += 1 fn -= 1 elif v == "BooleanA" and test_atypes[k] == "NominalA": # boolean is "more" correct tp += 1 fn -= 1 else: print(k, v, test_atypes[k]) p = tp / total r = tp / (tp + fn) f = 2 * ((p * r) / (p + r)) print("Attribute type scores:") print(f"P: {p} , R: {r} , F: {f}")
7,337
47d72379b894826dad335f098649702ade195f78
n=int(input("Digite um número")) m=n-1 o=n+1 print("Seu número é {} seu antecessor é {} e seu sucessor é {}".format(n,m,o))
7,338
a72d878d246a459038640bf9c1deff562994b345
def solution(skill, skill_trees): answer = 0 for tree in skill_trees: able = True for i in range(len(skill) - 1, 0, -1): index = tree.find(skill[i]) if index != -1 and i > 0: if tree[:index].find(skill[i - 1]) == -1: able = False break if able: answer += 1 return answer if __name__ == "__main__": skill = "CBD" skill_trees = ["BACDE", "CBADF", "AECB", "BDA"] solution(skill=skill, skill_trees=skill_trees)
7,339
42d26ef51bb4dafc8a0201a828652e166a3905e4
def unique(lisst): setlisst = set(lisst) return len(setlisst) print(unique({4,5,1,1,3}))
7,340
8e443d136a4e9fcdd18a106192f9c097928b8c99
from typing import List, Any, Callable, Iterable, TypeVar, Tuple T = TypeVar('T') def partition(pred: Callable[[T], bool], it: Iterable[T]) \ -> Tuple[List[T], List[T]]: ...
7,341
62dab85b7ab5fdae8117827b2f56bccf99615cb7
a=[[*map(int,input().split())]for _ in range(int(input()))] a.sort() s=0 l0,h0=a[0] for l,h in a: if h0<h:s+=(l-l0)*h0;l0,h0=l,h l1,h1=a[-1] for l,h in a[::-1]: if h>h1:s+=(l1-l)*h1;l1,h1=l,h s+=(l1-l0+1)*h1 print(s)
7,342
8bb86cae3387a0d4ce5987f3e3c458c8298174e0
from .settings import * # Heroku Configurations # Parse database configuration from $DATABASE_URL import dj_database_url DATABASES = {'default': dj_database_url.config()} # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # loading local_settings.py try: from .local_settings import * except Exception as e: pass # ALLAUTH configuration # Specific the login method to use ACCOUNT_USERNAME_REQUIRED = False # ACCOUNT_AUTHENTICATION_METHOD = "username", "email", "username_email" # Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”. # When set to “mandatory” the user is blocked from logging in until the email address is verified. # Choose “optional” or “none” to allow logins with an unverified e-mail address. # In case of “optional”, the e-mail verification mail is still sent, # whereas in case of “none” no e-mail verification mails are sent. ACCOUNT_EMAIL_VERIFICATION = "none" # Determines whether or not the user is automatically logged out by a mere GET request. # See documentation for the LogoutView for details. ACCOUNT_LOGOUT_ON_GET = False # Request e-mail address from 3rd import party account provider? # E.g. using OpenID AX, or the Facebook “email” permission. SOCIALACCOUNT_QUERY_EMAIL = True # Dictionary containing provider specific settings. SOCIALACCOUNT_PROVIDERS = { 'facebook': { # we use facebook js_sdk instead od oauth2 'METHOD': 'js_sdk', 'SCOPE': ['email', 'public_profile', 'user_friends'], # using AUTH_PARAMS to pass along other parametees # to the FB.login JS SDK call 'AUTH_PARAMS': {'auth_type': 'reauthenticate'}, # field are fetch from the import Graph API 'FIELDS': ['first_name', 'last_name', 'email', 'birthday'], # JS SDK return a short-lived token suitable for client-side use. 'EXCHANGE_TOKEN': True, # Chose the current active language of the request 'LOCALE_FUNC': 'path.to.callable', 'VERIFIED_EMAIL': False, # Facebook Graph API version 'VERSION': 'v2.7' }, 'linkedin': { 'SCOPE': ['r_emailaddress'], 'PROFILE_FIELDS': [ 'id', 'first-name', 'last-name', 'email-address', 'public-profile-url' ] } } # login redirect url LOGIN_REDIRECT_URL = "/blog/jobs" # Default settings BOOTSTRAP3 = { # The URL to the jQuery JavaScript file 'jquery_url': '//code.jquery.com/jquery.min.js', # The Bootstrap base URL 'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', # The complete URL to the Bootstrap CSS file (None means derive it from base_url) 'css_url': None, # The complete URL to the Bootstrap CSS file (None means no theme) 'theme_url': None, # The complete URL to the Bootstrap JavaScript file (None means derive it from base_url) 'javascript_url': None, # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html) 'javascript_in_head': False, # Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags) 'include_jquery': False, # Label class to use in horizontal forms 'horizontal_label_class': 'col-md-3', # Field class to use in horizontal forms 'horizontal_field_class': 'col-md-9', # Set HTML required attribute on required fields 'set_required': True, # Set HTML disabled attribute on disabled fields 'set_disabled': False, # Set placeholder attributes to label if no placeholder is provided 'set_placeholder': True, # Class to indicate required (better to set this in your Django form) 'required_css_class': '', # Class to indicate error (better to set this in your Django form) 'error_css_class': 'has-error', # Class to indicate success, meaning the field has valid input (better to set this in your Django form) 'success_css_class': 'has-success', # Renderers (only set these if you have studied the source and understand the inner workings) 'formset_renderers':{ 'default': 'bootstrap3.renderers.FormsetRenderer', }, 'form_renderers': { 'default': 'bootstrap3.renderers.FormRenderer', }, 'field_renderers': { 'default': 'bootstrap3.renderers.FieldRenderer', 'inline': 'bootstrap3.renderers.InlineFieldRenderer', }, } # Axes Configurations # Number of login attempts allowed before a record is created for the failed logins. AXES_LOGIN_FAILURE_LIMIT = 3 # After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)? AXES_LOCK_OUT_AT_FAILURE = True # If True, lock out / log based on an IP address AND a user agent. This means requests from different import user # agents but from the import same IP are treated differently. AXES_USE_USER_AGENT = True # Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a # python timedelta object or an integer, if you set it to be integer it will represent a number of hours AXES_COOLOFF_TIME = 50 # Specifies a logging mechanism for axes to use AXES_LOCKOUT_TEMPLATE = 'axes.watch_login' # Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as # context variables AXES_LOCKOUT_TEMPLATE = None # Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template # will be used AXES_LOCKOUT_URL = None # If Truem you'll see slightly more logging for Axes AXES_VERBOSE = True # The name of the for field that contains your usernames # AXES_USERNAME_FORM_FIELD = username # If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on # IP. Default: False AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False # Crispy forms will use BOOTSTRAP3 TEMPLATE PACK CRISPY_TEMPLATE_PACK = "bootstrap3" # Signal Admins Configurations ADMINS = ( ("Petar Pilipovic", "petar@literatillc.com"), ) # RESTframework Permission classes configuration REST_FRAMEWORK = { "DEFAULT_PERMISSION_CLASSES": [ "rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly" ] }
7,343
db231ea92319414dd10ca8dfbc14e5a70ed2fe44
from QnA_processor.question_analysis.google_question_classifier import GoogleQuestionClassifier def classify_question(query): try: """ Get answer-type from google autoML classifier (by making POST requests with authorization key) """ question_classifier = GoogleQuestionClassifier() answer_type = question_classifier.classify_by_api_call(query) except KeyError : """ Get answer-type from google autoML classifier (without authorization key by using google package) """ answer_type = question_classifier.classify_by_package(query) except: """ Get answer-type from custom question classifier """ from QnA_processor.question_analysis.custom_question_classifier import CustomQuestionClassifier question_classifier = CustomQuestionClassifier() answer_type = question_classifier.classify_question(query)[0] return answer_type # print (classify_question("How many seasons are there in a year"))
7,344
f704742b9e023a1c3386fed293032fd8196b875e
# -*- coding: utf-8 -*- # Author : Seungyeon Jo # e-mail : syjo@seculayer.co.kr # Powered by Seculayer © 2018 AI-Core Team from mlps.core.data.cnvrtr.ConvertAbstract import ConvertAbstract class Substr(ConvertAbstract): def __init__(self, **kwargs): super().__init__(**kwargs) def apply(self, data): result = '' # check blank if self._isBlank(data) : return [result] s_idx = 0 e_idx = 0 if len(self.arg_list) >= 2 : s_idx = int(self.arg_list[0]) e_idx = int(self.arg_list[1]) else: return [result] if s_idx > len(data): s_idx = 0 if e_idx > len(data): e_idx = len(data) if e_idx == 0: result = data[s_idx:] else: result = data[s_idx:e_idx] return [result] if __name__ == "__main__": _str = "Korea" print(Substr(arg_list=[0, 1]).apply(_str))
7,345
12fd4e3bfb6821205a9b65b4d236b4158ec4ef1e
#!/usr/bin/env python # # Copyright 2017-2021 University Of Southern California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from sqlalchemy.exc import * from Pegasus.db.admin.admin_loader import * from Pegasus.db.admin.versions.base_version import BaseVersion from Pegasus.db.schema import * DB_VERSION = 8 log = logging.getLogger(__name__) class Version(BaseVersion): def __init__(self, connection): super().__init__(connection) def update(self, force=False): """ :param force: :return: """ log.info("Updating to version %s" % DB_VERSION) try: log.info("Updating master_workflowstate...") self.db.execute("ALTER TABLE master_workflowstate ADD reason TEXT NULL") except (OperationalError, ProgrammingError): pass except Exception as e: self.db.rollback() log.exception(e) raise Exception(e) self.db.commit() def downgrade(self, force=False): "Downgrade is not necessary as reason accepts NULL values"
7,346
357ee02060cbfa391920b3d45dfbe16e679a6c8d
def spam(divide_by): return 42 / divide_by try: print(spam(2)) print(spam(12)) print(spam(0)) print(spam(1)) print(spam("dog")) except Exception: print("Error: Invalid argument.")
7,347
4473971552aa48236b19dec7e7c1ea1e622d5795
from subprocess import check_output import json import sys import time import os import numpy as np from hutch_python.utils import safe_load from ophyd import EpicsSignalRO from ophyd import EpicsSignal from bluesky import RunEngine from bluesky.plans import scan from bluesky.plans import list_scan from bluesky.plan_stubs import configure #from bluesky.plans import list_grid_scan from ophyd import Component as Cpt from ophyd import Device from pcdsdevices.epics_motor import Newport, IMS, MMC100 from pcdsdevices.interface import BaseInterface from pcdsdevices.device_types import Trigger from pcdsdevices.areadetector import plugins from cxi.db import daq, seq from cxi.db import camviewer from cxi.db import RE from cxi.db import foil_x, foil_y from cxi.db import cxi_pulsepicker as pp, seq from cxi.db import bp, bpp, bps from cxi.plans import serp_seq_scan from time import sleep class User(): def __init__(self): self._sync_markers = {0.5:0, 1:1, 5:2, 10:3, 30:4, 60:5, 120:6, 360:7} self.evr_pp = Trigger('CXI:R48:EVR:41:TRIG0',name='evr_pp') self.pp_delay = EpicsSignal('CXI:R48:EVR:41:TRIG0:TDES', name='pp_delay') with safe_load('sam_x'): self.sam_x = IMS('CXI:SC2:MMS:06', name='sam_x') with safe_load('sam_y'): self.sam_y = IMS('CXI:SC2:MMS:05', name='sam_y') with safe_load('sam_z'): self.sam_z = IMS('CXI:SC2:MMS:08', name='sam_z') #with safe_load('sam_pitch'): # self.sam_pitch = MMC100('CXI:USR:MMC:01', name='sam_pitch') #with safe_load('post_sam_x'): # self.post_sam_x = IMS('CXI:USR:MMS:27', name='post_sam_x') #with safe_load('post_sam_y'): # self.post_sam_y = MMC100('CXI:USR:MMC:02', name='post_sam_y') #with safe_load('post_sam_z'): # self.post_sam_z = MMC100('CXI:USR:MMC:03', name='post_sam_z') with safe_load('op_focus'): self.wfs_focus = IMS('CXI:USR:MMS:26', name='wfs_focus') with safe_load('op_x'): self.wfs_x = Newport('CXI:USR:MMN:09', name='wfs_x') with safe_load('op_y'): self.wfs_v = IMS('CXI:USR:MMS:25', name='wfs_v') def takeRun(self, nEvents, record=True): daq.configure(events=120, record=record) daq.begin(events=nEvents) daq.wait() daq.end_run() def get_ascan(self, motor, start, end, nsteps, nEvents, record=True): daq.configure(nEvents, record=record, controls=[motor]) return scan([daq], motor, start, end, nsteps) def get_dscan(self, motor, start, end, nsteps, nEvents, record=True): daq.configure(nEvents, record=record) currPos = motor.wm() return scan([daq], motor, currPos+start, currPos+end, nsteps) def ascan(self, motor, start, end, nsteps, nEvents, record=True): daq.configure(nEvents, record=record, controls=[motor]) RE(scan([daq], motor, start, end, nsteps)) def listscan(self, motor, posList, nEvents, record=True): daq.configure(nEvents, record=record, controls=[motor]) RE(list_scan([daq], motor, posList)) def dscan(self, motor, start, end, nsteps, nEvents, record=True): daq.configure(nEvents, record=record, controls=[motor]) currPos = motor.wm() RE(scan([daq], motor, currPos+start, currPos+end, nsteps)) def setupSequencer(self, flymotor, distance, deltaT_shots, pp_shot_delay=2): ## Setup sequencer for requested rate #sync_mark = int(self._sync_markers[self._rate]) #leave the sync marker: assume no dropping. sync_mark = int(self._sync_markers[120]) seq.sync_marker.put(sync_mark) #seq.play_mode.put(0) # Run sequence once seq.play_mode.put(1) # Run sequence N Times # Determine the different sequences needed beamDelay = int(120*deltaT_shots)-pp_shot_delay if (beamDelay+pp_shot_delay)<4: print('PP cannot go faster than 40 Hz in flip-flip mode, quit!') return fly_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]] #logging.debug("Sequence: {}".format(fly_seq)) #calculate how often to shoot in requested distance flyspeed = flymotor.velocity.get() flytime = distance/flyspeed flyshots = int(flytime/deltaT_shots) seq.rep_count.put(flyshots) # Run sequence N Times seq.sequence.put_seq(fly_seq) def setPP_flipflip(self, nshots=20, deltaShots=30): ## Setup sequencer for requested rate #sync_mark = int(self._sync_markers[self._rate]) #leave the sync marker: assume no dropping. sync_mark = int(self._sync_markers[120]) seq.sync_marker.put(sync_mark) #seq.play_mode.put(0) # Run sequence once seq.play_mode.put(1) # Run sequence N Times seq.rep_count.put(nshots) # Run sequence N Times # Determine the different sequences needed beamDelay = int(delta_shots)-pp_shot_delay if (beamDelay+pp_shot_delay)<4: print('PP cannot go faster than 40 Hz in flip-flip mode, quit!') return ff_seq = [[185, beamDelay, 0, 0], [187, pp_shot_delay, 0, 0]] #logging.debug("Sequence: {}".format(fly_seq)) seq.sequence.put_seq(ff_seq) def set_pp_flipflop(self): pp.flipflop(wait=True) def runflipflip(self, start, end, nsteps,nshots=20, deltaShots=30): self.set_pp_flipflop() #self.setPP_flipflip(nshots=20, deltaShots=6) for i in nsteps: self.evr_pp.ns_delay.set(start+delta*i) seq.start() time.sleep(5) def run_evr_seq_scan(self, start, env, nsteps, record=None, use_l3t=None): """RE the plan.""" self.set_pp_flipflop() RE(evr_seq_plan(daq, seq, self.evr_pp, start, env, nsteps, record=record, use_l3t=use_l3t)) def evr_seq_plan(self, daq, seq, evr, start, end, nsteps, record=None, use_l3t=None): """Configure daq and do the scan, trust other code to set up the sequencer.""" yield from configure(daq, events=None, duration=None, record=record, use_l3t=use_l3t, controls=[evr]) yield from scan([daq, seq], evr, start, end, nsteps) def run_serp_seq_scan(self, shiftStart, shiftStop, shiftSteps, flyStart, flyStop, deltaT_shots, record=False, pp_shot_delay=2): daq.disconnect() #make sure we start from fresh point. shiftMotor=foil_y flyMotor=foil_x self.setupSequencer(flyMotor, abs(flyStop-flyStart), deltaT_shots, pp_shot_delay=pp_shot_delay) daq.configure(-1, record=record, controls=[foil_x, foil_y]) #daq.begin(-1) if isinstance(shiftSteps, int): RE(serp_seq_scan(shiftMotor, np.linspace(shiftStart, shiftStop, shiftSteps), flyMotor, [flyStart, flyStop], seq)) else: RE(serp_seq_scan(shiftMotor, np.arange(shiftStart, shiftStop, shiftSteps), flyMotor, [flyStart, flyStop], seq)) def PPburst_sequence(self, nShots=None, nOffShots=2): if nOffShots < 2: raise ValueError('Minimum offshots is 2') ff_seq = [[185, 0, 0, 0]] ff_seq.append([179, 1 , 0, 0]) ff_seq.append([179, 1 , 0, 0]) if nShots is not None: if isinstance(nShots , int): ff_seq.append([185, nShots-2, 0, 0]) else: ff_seq.append([185, int(nShots*120)-2, 0, 0]) ff_seq.append([179, 2, 0, 0]) if nShots is not None: if isinstance(nShots , int): for i in range(nOffShots-2): ff_seq.append([179, 1, 0, 0]) else: for i in range(int(nOffShots*120)-2): ff_seq.append([179, 1, 0, 0]) return ff_seq def prepare_seq_PPburst(self, nShots=None, nOffShots=None): ## Setup sequencer for requested rate #sync_mark = int(self._sync_markers[self._rate]) #leave the sync marker: assume no dropping. sync_mark = int(self._sync_markers[120]) seq.sync_marker.put(sync_mark) seq.play_mode.put(0) # Run sequence once #seq.play_mode.put(1) # Run sequence N Times #seq.rep_count.put(nshots) # Run sequence N Times ff_seq = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots) seq.sequence.put_seq(ff_seq) def PPburst_sequence_pattern(self, nShots=None, nOffShots=None, nTimes=1): single_burst = self.PPburst_sequence(nShots=nShots, nOffShots=nOffShots) ff_seq = [] for i in range(nTimes): ff_seq += single_burst return ff_seq def prepare_seq_PPburst_pattern(self, nShots=None, nOffShots=None, nTimes=1): ## Setup sequencer for requested rate #sync_mark = int(self._sync_markers[self._rate]) #leave the sync marker: assume no dropping. sync_mark = int(self._sync_markers[120]) seq.sync_marker.put(sync_mark) seq.play_mode.put(0) # Run sequence once #seq.play_mode.put(1) # Run sequence N Times #seq.rep_count.put(nshots) # Run sequence N Times ff_seq = self.PPburst_sequence_pattern(nShots=nShots, nOffShots=nOffShots, nTimes=nTimes) seq.sequence.put_seq(ff_seq) def dumbSnake(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime): """ simple rastering for running at 120Hz with shutter open/close before and after motion stop. Need some testing how to deal with intermittent motion errors. """ self.sam_x.umv(xStart) daq.connect() daq.begin() sleep(2) print('Reached horizontal start position') # looping through n round trips for i in range(nRoundTrips): try: print('starting round trip %d' % (i+1)) self.sam_x.mv(xEnd) sleep(0.1) pp.open() sleep(sweepTime) pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) sleep(1.2)#orignal was 1 self.sam_x.mv(xStart) sleep(0.1) pp.open() sleep(sweepTime) pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) print('ypos',x.sam_y.wm()) sleep(1.2)#original was 1 except: print('round trip %d didn not end happily' % i) daq.end_run() daq.disconnect() def dumbSnake_pp(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime): """ simple rastering for running at 120Hz with shutter open/close before and after motion stop. Need some testing how to deal with intermittent motion errors. """ self.sam_x.umv(xStart) daq.connect() daq.begin() sleep(2) print('Reached horizontal start position') # looping through n round trips for i in range(nRoundTrips): try: print('starting round trip %d' % (i+1)) self.sam_x.mv(xEnd) sleep(0.1) seq.start() #sleep(sweepTime) #pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) sleep(1.2)#orignal was 1 self.sam_x.mv(xStart) sleep(0.1) #pp.open() #sleep(sweepTime) #pp.close() seq.start() self.sam_x.wait() self.sam_y.mvr(yDelta) print('ypos',x.sam_y.wm()) sleep(1.2)#original was 1 except: print('round trip %d didn not end happily' % i) daq.end_run() daq.disconnect() def dumbSnake_v(self, yStart, yEnd, xDelta, nRoundTrips, sweepTime): """ simple rastering for running at 120Hz with shutter open/close before and after motion stop. Need some testing how to deal with intermittent motion errors. """ self.sam_y.umv(yStart) daq.connect() daq.begin() sleep(2) print('Reached horizontal start position') # looping through n round trips for i in range(nRoundTrips): try: print('starting round trip %d' % (i+1)) self.sam_y.mv(yEnd) sleep(0.05) pp.open() sleep(sweepTime) pp.close() self.sam_y.wait() self.sam_x.mvr(xDelta) sleep(1.2)#orignal was 1 self.sam_y.mv(yStart) sleep(0.05) pp.open() sleep(sweepTime) pp.close() self.sam_y.wait() self.sam_x.mvr(xDelta) sleep(1.2)#original was 1 except: print('round trip %d didn not end happily' % i) daq.end_run() daq.disconnect() def dumbSnake_burst_window(self,xStart,xEnd,yDelta, nRoundTrips, sweepTime,windowlist):#for burst mode """ simple rastering for running at 120Hz with shutter open/close before and after motion stop. Need some testing how to deal with intermittent motion errors. """ #windowList = np.zeros([numYwindow,numXwindow],dtype=object) self.sam_x.umv(xStart) daq.connect() daq.begin() sleep(2) print('Reached horizontal start position') # looping through n round trips for j in (windowList): self.sam_y.umv(windowList) self.sam_y.wait() print('Windos position %f'%(self.sam_w.wm())) for i in range(nRoundTrips): try: print('starting round trip %d' % (i+1)) self.sam_x.mv(xEnd) sleep(0.05) seq.start()#start sequence Need to be set #sleep(sweepTime) #pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) sleep(1)#wait for turning around self.sam_x.mv(xStart) sleep(0.05) #pp.open() seq.start()#start sequence #sleep(sweepTime) #pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) sleep(1) except: print('round trip %d didn not end happily' % i) daq.end_run() daq.disconnect() def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,windowList,startgrid):#for burst mode """ simple rastering for running at 120Hz with shutter open/close before and after motion stop. sleeptime is the pp close time between window Need some testing how to deal with intermittent motion errors. """ self.sam_x.umv(xStart) self.sam_y.umv(windowList[startgrid]) daq.connect() daq.begin() sleep(2) print('Reached horizontal start position') # looping through n round trips for j in range(len(windowList)-startgrid): self.sam_y.umv(windowList[startgrid+j]) self.sam_y.wait() print('Windos position %f'%(self.sam_y.wm())) for i in range(nRoundTrips): try: print('starting round trip %d' % (i+1)) self.sam_x.mv(xEnd) sleep(0.1) seq.start()#start sequence Need to be set #sleep(sweepTime) #pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) print('yposition',self.sam_y.wm()) sleep(1.2)#wait for turning around self.sam_x.mv(xStart) sleep(0.1) #pp.open() seq.start()#start sequence #sleep(sweepTime) #pp.close() self.sam_x.wait() self.sam_y.mvr(yDelta) print('yposition',self.sam_y.wm()) sleep(1.2) except: print('round trip %d didn not end happily' % i) daq.end_run() daq.disconnect() #daq.end() #def run_serp_seq_scan_expl(self, yStart, yStop, ySteps, flyStart, flyStop, deltaT_shots, record=False, pp_shot_delay=1): # daq.disconnect() #make sure we start from fresh point. # self.setupSequencer(foil_y, abs(flyStop-flyStart), deltaT_shots, pp_shot_delay=pp_shot_delay) # daq.configure(-1, record=record, controls=[foil_x, foil_y]) #daq.begin(-1) # if isinstance(ySteps, int): # RE(serp_seq_scan(foil_x, np.linspace(yStart, yStop, ySteps), foil_y, [flyStart, flyStop], seq)) # else: # RE(serp_seq_scan(foil_x, np.arange(yStart, yStop, ySteps), foil_y, [flyStart, flyStop], seq)) # #daq.end()
7,348
e247ffb5b6e4319ff17d0b8ae9f67e10c282c4ff
# 多角色认证装饰器 def auth(role): from core import admin_view,student_view,teacher_view def deco(func): def wrapper(*args,**kwargs): if role == 'admin': if admin_view.admin_user == None: admin_view.login() else: res = func(*args,**kwargs) return res if role == 'student': if student_view.student_user == None: student_view.login() else: res = func(*args,**kwargs) return res if role == 'teacher': if teacher_view.teacher_user == None: teacher_view.login() else: res = func(*args,**kwargs) return res return wrapper return deco
7,349
8c96c38a67c2eb97e30b325e4917ba4888731118
import json import boto3 import os import datetime regionName = os.environ['AWS_REGION'] BUCKET_PATH = os.environ['BUCKET_PATH'] SENSITIVIT = os.environ['SENSITIVIT'] s3_client = boto3.client('s3', region_name=regionName) ddb_resource = boto3.resource('dynamodb', region_name=regionName) def lambda_handler(event, context): # body = json.loads(event['body']) body = event videoPath = str(body['videoPath']) templatePath = str(body['templatePath']) facePath = str(body['facePath']) targetPeople = str(body['targetPeople']) FACES_BUCKET = facePath.split('/')[2] FACES_OBJECT = '/'.join(facePath.split('/')[3:]) s3_client.download_file(FACES_BUCKET, FACES_OBJECT, '/tmp/faces.json') facesJson = open('/tmp/faces.json', 'r') facesData = json.load(facesJson) FRAME_RATE = int(facesData['VideoMetadata']['FrameRate']) PEOPLE = targetPeople.split(',') timeStamps = [] scenesTime = [] i = 0 while i < len(facesData['Persons']): try: for target in PEOPLE: if facesData['Persons'][i]['FaceMatches'] == []: pass elif facesData['Persons'][i]['FaceMatches'][0]['Face']['ExternalImageId'] == target.strip(): timeStamps.append(facesData['Persons'][i]['Timestamp']) except IndexError: pass i = i+1 timeCollection = [[timeStamps[0]]] i = 1 j = 0 while i < len(timeStamps): if timeStamps[i] - timeCollection[j][-1] <= 1000: timeCollection[j].append(timeStamps[i]) i = i+1 else: j = j+1 timeCollection.append([timeStamps[i]]) for collection in timeCollection: if collection[-1] - collection[0] >= 1000: if collection[0] % 1000 == 0: start = datetime.datetime.utcfromtimestamp(collection[0]//1000).strftime("%H:%M:%S") + ':00' elif int(collection[0] % 1000 / 1000 * FRAME_RATE) < 10: start = datetime.datetime.utcfromtimestamp(collection[0] // 1000).strftime("%H:%M:%S") + ':0' + str(int(collection[0] % 1000 / 1000 * FRAME_RATE)) else: start = datetime.datetime.utcfromtimestamp(collection[0]//1000).strftime("%H:%M:%S") + ':' + str(int(collection[0] % 1000 / 1000 * FRAME_RATE)) if collection[-1] % 1000 == 0: end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime("%H:%M:%S") + ':00' elif int(collection[-1] % 1000 / 1000 * FRAME_RATE) < 10: end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime("%H:%M:%S") + ':0' + str(int(collection[-1] % 1000 / 1000 * FRAME_RATE)) else: end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime("%H:%M:%S") + ':' + str(int(collection[-1] % 1000 / 1000 * FRAME_RATE)) scenesTime.append((start,end)) else: pass JOB_BUCKET = templatePath.split('/')[2] JOB_OBJECT = '/'.join(templatePath.split('/')[3:]) s3_client.download_file(JOB_BUCKET, JOB_OBJECT, '/tmp/job-template.json') finalName = [] for people in PEOPLE: finalName.append(people.strip()) OUTPUT_NAME = '-'+'-'.join(finalName) with open('/tmp/job-template.json', 'r') as r: template = json.load(r) for scene in scenesTime: template['Settings']['Inputs'][0]['InputClippings'].append({'StartTimecode': scene[0], 'EndTimecode': scene[-1]}) template['Settings']['Inputs'][0]['FileInput'] = videoPath template['Settings']['OutputGroups'][0]['Outputs'][0]['NameModifier'] = OUTPUT_NAME template['Settings']['OutputGroups'][0]['OutputGroupSettings']['FileGroupSettings']['Destination'] = BUCKET_PATH with open('/tmp/job-all.json', 'w') as w: json.dump(template, w, indent=2) w.close() r.close() mediaconvert_client = boto3.client('mediaconvert', region_name=regionName) response = mediaconvert_client.describe_endpoints(Mode='DEFAULT') mediaURL = response['Endpoints'][0]['Url'] mediaconvert_client = boto3.client('mediaconvert',endpoint_url=mediaURL) with open("/tmp/job-all.json", "r") as jsonfile: job_object = json.load(jsonfile) mediaconvert_client.create_job(**job_object) output = {'videoPath': videoPath, 'templatePath': templatePath, 'facePath': facePath, 'targetPerson': targetPeople, 'Frame Rate': FRAME_RATE } return { 'statusCode': 200, 'body': json.dumps(output) }
7,350
f9cee552dde5ecf229fda559122b4b0e780c3b88
class Solution: def countLetters(self, S: str) -> int: ans = 0 for _, g in itertools.groupby(S): cnt = len(list(g)) ans += (1 + cnt) * cnt // 2 return ans
7,351
e1751cc6f76f56e62cd02d61db65f1c27a4ff1b9
#encoding=utf-8 import pytest from frame_project.实战2.main_page import MainPage class TestMian: def test_mian(self): MainPage().goto_marketpage().goto_search().search() if __name__ == '__main__': pytest.main(['test_case.py','-s','-v'])
7,352
1b091d139635e90fb53b3fecc09bb879514c7b38
import os import json from google.appengine.ext import webapp from generic import JsonRpcService class ViewService(JsonRpcService): def json_create(self): return "Hello, World!"
7,353
3cf2ffbc8163c2a447016c93ff4dd13e410fff2b
# -*- coding: utf-8 -*- class Task: def __init__(self): self.title = '' self.subtasks = [] def set_title(self, title): self.title = title def set_subtasks(self, subtasks): self.subtasks = subtasks
7,354
98dbc6c3bdc3efb4310a2dbb7b1cc1c89eb4582b
import urllib3 with open('python.jpg', 'rb') as f: data = f.read() http = urllib3.PoolManager() r = http.request('POST', 'http://httpbin.org/post', body=data, headers={'Content-Type': 'image/jpeg'}) print(r.data.decode())
7,355
a9b1cc9b928b8999450b6c95656b863c476b273b
import sys from PyQt5 import QtWidgets from PyQt5.QtWidgets import QMainWindow, QApplication #---Import that will load the UI file---# from PyQt5.uic import loadUi import detechRs_rc #---THIS IMPORT WILL DISPLAY THE IMAGES STORED IN THE QRC FILE AND _rc.py FILE--# #--CLASS CREATED THAT WILL LOAD THE UI FILE class Login(QMainWindow): def __init__(self): super(Login, self).__init__() # --- FROM THE IMPORT PYQT5.UIC IMPORT LOADUI---## loadUi("login_UI.ui",self) #--- a code once the login button clicked, will call the loginFunction ---# self.loginButton.clicked.connect(self.loginFunction) #-- Created a function called "loginFunction" --# def loginFunction(self): lgUserLine=self.lgUserLine.text() #-- Getting the textbox context lgUserline --# lgPassLine=self.lgPassLine.text() #-- Getting the textbox context lgPassline --# #-- Will display at the terminal what you wrote in the textbox(QLineEdit) --# print("Success, ", lgUserLine, "and ", lgPassLine) app=QApplication(sys.argv) loginWindow=Login() widget=QtWidgets.QStackedWidget() widget.addWidget(loginWindow) #-- displays all design widgets of the UI Window --# widget.setFixedWidth(1190) #-- setting the fixed window size in width --# widget.setFixedHeight(782) #-- setting the fixed window size in height--# widget.show() app.exec_() #-- window execution --#
7,356
bb3c42c9f87a463b9f18601c9e3897b6d21351d5
from django.db import models from django.utils import timezone from django.db.models.signals import post_save from django.urls import reverse # Create your models here. class Purchase(models.Model): invoice = models.SmallIntegerField(primary_key=True,blank=False) ch_no = models.SmallIntegerField(blank=True,null=True) vendor = models.CharField(max_length=128, blank=False) date = models.DateTimeField(default=timezone.now, blank=False) description = models.TextField(max_length=4096, blank=True, null=True) def __str__(self): return self.vendor def get_absolute_url(self): return reverse('entry:purchase_detail', kwargs={'pk': self.pk}) class PurchaseDetail(models.Model): PRODUCT_CHOICES = ( ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC', 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC','Fabric'), ('STEEL', 'Steel'), ) purchase= models.ForeignKey(Purchase,on_delete=models.CASCADE) product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES, default='WOOD') quantity = models.PositiveSmallIntegerField(blank=False) rate = models.IntegerField(blank=False) total = models.IntegerField(blank=False) remarks = models.CharField(max_length=250) def _get_total(self): return self.quantity * self.rate labor_total = property(_get_total) def __str__(self): return (self.product_name)
7,357
3328c2ae0816c146398ecde92a056d1e77683696
import tkinter as tk from telnetConn import telnetConnection fields = 'Host Address', 'UserName', 'Password', 'Message To', 'Text' def fetch(entries): input_list = [] for entry in entries: field = entry[0] text = entry[1].get() input_list.append(text) # print('%s: "%s"' % (field, text)) telnetConnection(input_list[0],input_list[1],input_list[2],input_list[3],input_list[4]) def makeform(root, fields): entries = [] for field in fields: row = tk.Frame(root) lab = tk.Label(row, width=15, text=field, anchor='w') ent = tk.Entry(row) row.pack(side=tk.TOP, fill=tk.X, padx=5, pady=5) lab.pack(side=tk.LEFT) ent.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.X) entries.append((field, ent)) return entries if __name__ == '__main__': root = tk.Tk() ents = makeform(root, fields) root.bind('<Return>', (lambda event, e=ents: fetch(e))) btnSend = tk.Button(root, text='Send', command=(lambda e=ents: fetch(e))) btnSend.pack(side=tk.LEFT, padx=5, pady=5) btnQuit = tk.Button(root, text='Quit', command=root.quit) btnQuit.pack(side=tk.LEFT, padx=5, pady=5) root.mainloop()
7,358
ff8b6bc607dac889da05b9f7e9b3595151153614
import requests import json from concurrent import futures from tqdm import trange def main(): ex=futures.ThreadPoolExecutor(max_workers=50) for i in trange(1,152): url="https://api.bilibili.com/pgc/season/index/result?season_version=-1&" \ "area=-1&is_finish=-1&copyright=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&order=3&st=1&sort=0&" \ "page={}&" \ "season_type=1&pagesize=20&type=1".format(i) ex.submit(index_page,url) def index_page(url): res=requests.get(url) res.encoding=res.apparent_encoding next_page(res.text) def next_page(html): data=json.loads(html) for i in data['data']['list']: img_url=i['cover'] img_name=i['title'] # print(img_name) get_img(img_url,img_name) def get_img(img_url,img_name): img=requests.get(img_url) with open('img/{}.jpg'.format(img_name),'w+b') as f: f.write(img.content) main()
7,359
2305d0b7ec0d9e08e3f1c0cedaafa6ed60786e50
#! /usr/bin/env python3 import common, os, shutil, sys def main(): os.chdir(common.root) shutil.rmtree('shared/target', ignore_errors = True) shutil.rmtree('platform/build', ignore_errors = True) shutil.rmtree('platform/target', ignore_errors = True) shutil.rmtree('tests/target', ignore_errors = True) shutil.rmtree('examples/lwjgl/target', ignore_errors = True) shutil.rmtree('examples/kwinit/target', ignore_errors = True) shutil.rmtree('examples/jwm/target', ignore_errors = True) shutil.rmtree('examples/swt/target', ignore_errors = True) return 0 if __name__ == '__main__': sys.exit(main())
7,360
cf5ab10ce743aa261867501e93f498022e5908fe
#!python3 import configparser parser = configparser.ConfigParser() parser.read("sim.conf") print(parser.get("config", "option1")) print(parser.get("config", "option2")) print(parser.get("config", "option3"))
7,361
e47d6b5d46f2dd84569a2341178b2ea5e074603a
import cv2 import numpy as np import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt from keras.preprocessing.image import ImageDataGenerator from keras.models import Sequential from keras.layers import SeparableConv2D, Conv2D, MaxPooling2D from keras.layers import BatchNormalization, Activation, Dropout, Flatten, Dense from keras import backend as K # dimensions of images. img_width, img_height = 64,64 train_data_dir = 'data/train' validation_data_dir = 'data/test' nb_train_samples = 25473 nb_validation_samples = 7000 epochs = 50 batch_size = 64 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) else: input_shape = (img_width, img_height, 3) model = Sequential() convout1 = Conv2D(32, kernel_size=6, strides=2, input_shape=input_shape) model.add(convout1) activ1 = Activation('relu') model.add(activ1) convout2 = Conv2D(64, kernel_size=5, strides=1) model.add(convout2) activ2 = Activation('relu') model.add(activ2) pool1 = MaxPooling2D(pool_size=(3, 3), strides=1) model.add(pool1) convout3 = Conv2D(128, kernel_size=4, strides=2) model.add(convout3) activ3 = Activation('relu') model.add(activ3) convout4 = Conv2D(128, kernel_size=3, strides=1) model.add(convout4) activ4 = Activation('relu') model.add(activ4) pool2 = MaxPooling2D(pool_size=2, strides=1) model.add(pool2) convout5 = Conv2D(256, kernel_size=3, strides=1) model.add(convout5) activ5 = Activation('relu') model.add(activ5) pool3 = MaxPooling2D(pool_size=2, strides=1) model.add(pool3) model.add(Flatten()) dense1 = Dense(256) model.add(dense1) activ6 = Activation('relu') model.add(activ6) batchn = BatchNormalization() model.add(batchn) dense2 = Dense(184) model.add(dense2) activ7 = Activation('softmax') model.add(activ7) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) img = cv2.imread('test.jpg') img = cv2.resize(img, (64, 64)) img = np.expand_dims(img, axis=0) classes = model.predict(img) def layer_to_visualize(layer): inputs = [K.learning_phase()] + model.inputs _convout1_f = K.function(inputs, [layer.output]) def convout1_f(X): # The [0] is to disable the training phase flag return _convout1_f([0] + [X]) convolutions = convout1_f(img) convolutions = np.squeeze(convolutions) print ('Shape of conv:', convolutions.shape) n = convolutions.shape[0] n = int(np.ceil(np.sqrt(n))) # Visualization of each filter of the layer fig = plt.figure(figsize=(12,8)) for i in range(len(convolutions)): ax = fig.add_subplot(n,n,i+1) ax.imshow(convolutions[i], cmap='gray') # Specify the layer to want to visualize layer_to_visualize(convout1) layer_to_visualize(activ1) layer_to_visualize(convout2) layer_to_visualize(activ2) layer_to_visualize(pool1) layer_to_visualize(convout3) layer_to_visualize(activ3) layer_to_visualize(convout4) layer_to_visualize(activ4) layer_to_visualize(pool2) layer_to_visualize(convout5) layer_to_visualize(activ5) layer_to_visualize(pool3)
7,362
bcc2977f36ecc775f44ae4251ce230af9abf63ba
''' This module demonstrates how to use some functionality of python built-in csv module ''' import csv def csv_usage(): ''' This function demonstrates how to use csv module to read and write csv files ''' with open('example.csv', 'r', newline='') as csvfile: reader_c = csv.reader(csvfile, delimiter=';') for row in reader_c: print(', '.join(row)) with open('new-2.csv', 'w', newline='') as csvfile: writer_c = csv.writer(csvfile, delimiter=',') writer_c.writerow(['Name', 'Age', 'City']) writer_c.writerow(['Joe', '25', 'Miami']) writer_c.writerow(['Nick', '21', 'Mexico']) if __name__ == '__main__': csv_usage()
7,363
5066c2a5219cf1b233b4985efc7a4eb494b784ca
def gen_metadata(fn): metadata = {} lines = open(fn,'r').readlines() for line in lines: line = line.rstrip() if len(line) == 0: continue elif line.startswith('#'): continue elif line.startswith('%'): continue else: # Special case RingThresh firstWord = line.split()[0] if line.startswith('RingThresh'): if 'RingThresh' not in metadata.keys(): metadata.update({'RingThresh':{}}) strippedline = line.split(firstWord)[1].strip() secondword = strippedline.split()[0] metadata['RingThresh'].update({secondword:strippedline.split(secondword)[1].split('#')[0].strip()}) else: metadata.update({firstWord : line.split(firstWord)[1].split('#')[0].strip()}) return metadata def SetupPayloads(inp): flow_input = { "input": { "inject_source_endpoint_id": inp['sourceEP'], "funcx_endpoint_non_compute": inp['sourceNCEP'], "proc_endpoint_non_compute": inp['procNCEP'], "inject_source_path": inp['sourcePath'], "inject_destination_endpoint_id": inp['remoteDataEP'], "extract_source_endpoint_id": inp['remoteDataEP'], "funcx_endpoint_compute": inp['funcx_endpoint_compute'], "inject_destination_path": inp['executePath'], "extract_source_path": inp['executeResultPath'], "extract_destination_endpoint_id": inp['destEP'], "extract_destination_path": inp['resultPath'], "paramFileName": inp['pfName'], "startLayerNr": inp['startLayerNr'], "endLayerNr": inp['endLayerNr'], "nFrames": inp['nFrames'], "numProcs": inp['numProcs'], "numBlocks": inp['numBlocks'], "timePath": inp['timePath'], "StartFileNrFirstLayer": inp['startNrFirstLayer'], "NrFilesPerSweep": inp['nrFilesPerSweep'], "FileStem": inp['fileStem'], "SeedFolder": inp['seedFolder'], "RawFolder": inp['rawFolder'], "darkFN": inp['darkFN'], "StartNr": inp['startNr'], "EndNr": inp['endNr'], 'extract_recursive': False, 'inject_recursive': True,} } flow_input['input'].update({ 'multipletasks':[{ 'startLayerNr':inp['startLayerNr'], 'endLayerNr':inp['endLayerNr'], 'numProcs':inp['numProcs'], 'nFrames':inp['nFrames'], 'numBlocks':inp['numBlocks'], 'blockNr':idx, 'timePath':inp['timePath'], 'FileStem':inp['fileStem'], 'SeedFolder':inp['seedFolder'], 'RawFolder':inp['rawFolder'], 'paramFileName':inp['pfName'], } for idx in range(inp['numBlocks']) ] }) flow_input['input'].update({ 'pilot':{ 'dataset':f'{inp["sourcePath"]}/{inp["fileStem"]}_Layer_{str(inp["startLayerNr"]).zfill(4)}_Analysis_Time_{inp["timePath"]}/{inp["fileStem"]}_Layer_{str(inp["startLayerNr"]).zfill(4)}_Analysis_Time_{inp["timePath"]}/', 'index':inp['portal_id'], 'project':'hedm', 'source_globus_endpoint':inp['sourceEP'], } }) flow_input['input']['pilot'].update({ 'metadata':gen_metadata(inp['pfName']), }) flow_input['input']['pilot']['metadata'].update({ 'exp_id':f'{inp["experimentName"]}_{inp["fileStem"]}_{inp["timePath"]}', }) flow_input['input']['pilot']['metadata'].update({ 'time_path':inp["timePath"], }) flow_input['input']['pilot']['metadata'].update({ 'startNr':inp["startNr"], 'endNr':inp["endNr"], }) return flow_input
7,364
0d8a26ef4077b40e8255d5bb2ce9217b51118780
#!/usr/bin/python3 # encoding: utf-8 """ @author: ShuoChang @license: (C) MIT. @contact: changshuo@bupt.edu.cn @software: CRNN_STN_SEQ @file: decoder_base.py @time: 2019/7/22 17:21 @blog: https://www.zhihu.com/people/chang-shuo-59/activities """ from abc import ABCMeta from abc import abstractmethod class DecoderBase(object): """ Base model for decoder """ __metaclass__ = ABCMeta def __init__(self): self._predictor = 'decoder' self._label = None pass @abstractmethod def set_label(self, label): self._label = label @abstractmethod def predict(self, input_data): pass @abstractmethod def loss(self, input_data): pass @abstractmethod def sequence_dist(self, input_data): pass
7,365
c2dba981b0d628aebdf8cebfb890aad74a629b08
from enum import Enum from app.utilities.data import Prefab class Tags(Enum): FLOW_CONTROL = 'Flow Control' MUSIC_SOUND = 'Music/Sound' PORTRAIT = 'Portrait' BG_FG = 'Background/Foreground' DIALOGUE_TEXT = 'Dialogue/Text' CURSOR_CAMERA = 'Cursor/Camera' LEVEL_VARS = 'Level-wide Unlocks and Variables' GAME_VARS = 'Game-wide Unlocks and Variables' TILEMAP = 'Tilemap' REGION = 'Region' ADD_REMOVE_INTERACT_WITH_UNITS = 'Add/Remove/Interact with Units' MODIFY_UNIT_PROPERTIES = 'Modify Unit Properties' UNIT_GROUPS = 'Unit Groups' MISCELLANEOUS = 'Miscellaneous' HIDDEN = 'Hidden' class EventCommand(Prefab): nid: str = None nickname: str = None tag: Tags = Tags.HIDDEN desc: str = '' keywords: list = [] optional_keywords: list = [] flags: list = [] values: list = [] display_values: list = [] def __init__(self, values=None, disp_values=None): self.values = values or [] self.display_values = disp_values or values or [] def save(self): # Don't bother saving display values if they are identical if self.display_values == self.values: return self.nid, self.values else: return self.nid, self.values, self.display_values def to_plain_text(self): if self.display_values: return ';'.join([self.nid] + self.display_values) else: return ';'.join([self.nid] + self.values) def __repr__(self): return self.to_plain_text() class Comment(EventCommand): nid = "comment" nickname = '#' tag = Tags.FLOW_CONTROL desc = \ """ **Lines** starting with '#' will be ignored. """ def to_plain_text(self): return self.values[0] class If(EventCommand): nid = "if" tag = Tags.FLOW_CONTROL desc = \ """ If the _Condition_ returns true, the block under this command will be executed. If it returns false, the script will search for the next **elif**, **else**, or **end** command before proceeding. If it is not a valid Python expression, the result will be treated as false. Remember to end your **if** blocks with **end**. The indentation is not required, but is recommended for organization of the conditional blocks. Example: ``` if;game.check_dead('Eirika') lose_game elif;game.check_dead('Lyon') win_game else u;Eirika s;Eirika;Nice! r;Eirika end ``` """ keywords = ['Condition'] class Elif(EventCommand): nid = "elif" tag = Tags.FLOW_CONTROL desc = \ """ Works exactly like the **if** statement, but is called only if the previous **if** or **elif** returned false. In the following example, the **elif** will only be processed if `if;game.check_dead('Eirika')` return false. Example: ``` if;game.check_dead('Eirika') lose_game elif;game.check_dead('Lyon') win_game else u;Eirika s;Eirika;Nice! r;Eirika end ``` """ keywords = ['Condition'] class Else(EventCommand): nid = "else" tag = Tags.FLOW_CONTROL desc = \ """ Defines a block to be executed only if the previous **if** or **elif** returned false. Example: ``` if;game.check_dead('Eirika') lose_game elif;game.check_dead('Lyon') win_game else u;Eirika s;Eirika;Nice! r;Eirika end ``` """ class End(EventCommand): nid = "end" tag = Tags.FLOW_CONTROL desc = \ """ Ends a conditional block. Refer to the **if** command for more information. """ class Break(EventCommand): nid = "break" tag = Tags.FLOW_CONTROL desc = \ """ Immediately ends the current event. """ class Wait(EventCommand): nid = "wait" tag = Tags.FLOW_CONTROL desc = \ """ Pauses the execution of the script for _Time_ milliseconds. Often used after a scene transition, cursor movement, or reinforcements to give the player a chance to take in the scene. """ keywords = ['Time'] class EndSkip(EventCommand): nid = "end_skip" tag = Tags.FLOW_CONTROL desc = \ """ If the player was skipping through the event script, stop the skip here. Used to prevent a single skip from skipping through an entire event. """ class Music(EventCommand): nid = "music" nickname = "m" tag = Tags.MUSIC_SOUND desc = \ """ Fades in _Music_ over the course of _Time_ milliseconds. Fade in defaults to 400 milliseconds. """ keywords = ['Music'] optional_keywords = ['Time'] # How long to fade in (default 400) class MusicClear(EventCommand): nid = "music_clear" tag = Tags.MUSIC_SOUND desc = \ """ Fades out the currently playing song over the course of _Time_ milliseconds. Also clears the entire song stack. Fade out defaults to 400 milliseconds. """ optional_keywords = ['Time'] # How long to fade out class Sound(EventCommand): nid = "sound" tag = Tags.MUSIC_SOUND desc = \ """ Plays the _Sound_ once. """ keywords = ['Sound'] class ChangeMusic(EventCommand): nid = 'change_music' tag = Tags.MUSIC_SOUND desc = \ """ Changes the phase theme music. For instance, you could use this command to change the player phase theme halfway through the chapter. """ keywords = ['PhaseMusic', 'Music'] class AddPortrait(EventCommand): nid = "add_portrait" nickname = "u" tag = Tags.PORTRAIT desc = \ """ Adds a portrait to the screen. Extra flags: 1. _mirror_: Portrait will face opposite expected direction. 2. _low_priority_: Portrait will appear behind all other portraits on the screen. 3. _immediate_: Portrait will not fade in. 4. _no_block_: Portrait will fade in, but will not pause execution of event script while doing so. """ keywords = ['Portrait', 'ScreenPosition'] optional_keywords = ['Slide', 'ExpressionList'] flags = ["mirror", "low_priority", "immediate", "no_block"] class MultiAddPortrait(EventCommand): nid = "multi_add_portrait" nickname = "uu" tag = Tags.PORTRAIT desc = \ """ Adds more than one portrait to the screen at the same time. Accepts 2-4 portraits and their associated _ScreenPosition_ as input. """ keywords = ['Portrait', 'ScreenPosition', 'Portrait', 'ScreenPosition'] optional_keywords = ['Portrait', 'ScreenPosition', 'Portrait', 'ScreenPosition'] class RemovePortrait(EventCommand): nid = "remove_portrait" nickname = "r" tag = Tags.PORTRAIT keywords = ['Portrait'] flags = ["immediate", "no_block"] class MultiRemovePortrait(EventCommand): nid = "multi_remove_portrait" nickname = "rr" tag = Tags.PORTRAIT keywords = ['Portrait', 'Portrait'] optional_keywords = ['Portrait', 'Portrait'] class MovePortrait(EventCommand): nid = "move_portrait" tag = Tags.PORTRAIT keywords = ['Portrait', 'ScreenPosition'] flags = ["immediate", "no_block"] class BopPortrait(EventCommand): nid = "bop_portrait" nickname = "bop" tag = Tags.PORTRAIT keywords = ['Portrait'] flags = ["no_block"] class Expression(EventCommand): nid = "expression" nickname = "e" tag = Tags.PORTRAIT keywords = ['Portrait', 'ExpressionList'] class Speak(EventCommand): nid = "speak" nickname = "s" tag = Tags.DIALOGUE_TEXT keywords = ['Speaker', 'Text'] optional_keywords = ['ScreenPosition', 'Width', 'DialogVariant'] flags = ['low_priority'] class Transition(EventCommand): nid = "transition" nickname = "t" tag = Tags.BG_FG optional_keywords = ['Direction', 'Speed', 'Color3'] class Background(EventCommand): # Also does remove background nid = "change_background" nickname = "b" tag = Tags.BG_FG optional_keywords = ['Panorama'] flags = ["keep_portraits"] class DispCursor(EventCommand): nid = "disp_cursor" tag = Tags.CURSOR_CAMERA keywords = ["Bool"] class MoveCursor(EventCommand): nid = "move_cursor" nickname = "set_cursor" tag = Tags.CURSOR_CAMERA keywords = ["Position"] flags = ["immediate"] class CenterCursor(EventCommand): nid = "center_cursor" tag = Tags.CURSOR_CAMERA keywords = ["Position"] flags = ["immediate"] class FlickerCursor(EventCommand): nid = 'flicker_cursor' nickname = 'highlight' tag = Tags.CURSOR_CAMERA keywords = ["Position"] flags = ["immediate"] class GameVar(EventCommand): nid = 'game_var' tag = Tags.GAME_VARS keywords = ["Nid", "Condition"] class IncGameVar(EventCommand): nid = 'inc_game_var' tag = Tags.GAME_VARS keywords = ["Nid"] optional_keywords = ["Condition"] class LevelVar(EventCommand): nid = 'level_var' tag = Tags.LEVEL_VARS keywords = ["Nid", "Condition"] class IncLevelVar(EventCommand): nid = 'inc_level_var' tag = Tags.LEVEL_VARS keywords = ["Nid"] optional_keywords = ["Condition"] class WinGame(EventCommand): nid = 'win_game' tag = Tags.LEVEL_VARS class LoseGame(EventCommand): nid = 'lose_game' tag = Tags.LEVEL_VARS class ActivateTurnwheel(EventCommand): nid = 'activate_turnwheel' tag = Tags.MISCELLANEOUS # Whether to force the player to move the turnwheel back # defaults to true optional_keywords = ['Bool'] class BattleSave(EventCommand): nid = 'battle_save' tag = Tags.MISCELLANEOUS class ChangeTilemap(EventCommand): nid = 'change_tilemap' tag = Tags.TILEMAP keywords = ["Tilemap"] # How much to offset placed units by # Which tilemap to load the unit positions from optional_keywords = ["PositionOffset", "Tilemap"] flags = ["reload"] # Should place units in previously recorded positions class LoadUnit(EventCommand): nid = 'load_unit' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["UniqueUnit"] optional_keywords = ["Team", "AI"] class MakeGeneric(EventCommand): nid = 'make_generic' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS # Nid, class, level, team, ai, faction, anim variant keywords = ["String", "Klass", "String", "Team"] optional_keywords = ["AI", "Faction", "String", "ItemList"] class CreateUnit(EventCommand): nid = 'create_unit' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS # Unit template and new unit nid (can be '') keywords = ["Unit", "String"] # Unit level, position, entrytype, placement optional_keywords = ["String", "Position", "EntryType", "Placement"] class AddUnit(EventCommand): nid = 'add_unit' nickname = 'add' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["Unit"] optional_keywords = ["Position", "EntryType", "Placement"] class MoveUnit(EventCommand): nid = 'move_unit' nickname = 'move' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["Unit"] optional_keywords = ["Position", "MovementType", "Placement"] flags = ['no_block', 'no_follow'] class RemoveUnit(EventCommand): nid = 'remove_unit' nickname = 'remove' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["Unit"] optional_keywords = ["RemoveType"] class KillUnit(EventCommand): nid = 'kill_unit' nickname = 'kill' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["Unit"] flags = ['immediate'] class RemoveAllUnits(EventCommand): nid = 'remove_all_units' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS class RemoveAllEnemies(EventCommand): nid = 'remove_all_enemies' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS class InteractUnit(EventCommand): nid = 'interact_unit' nickname = 'interact' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["Unit", "Unit"] optional_keywords = ["CombatScript", "Ability"] class SetCurrentHP(EventCommand): nid = 'set_current_hp' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["Unit", "PositiveInteger"] class SetCurrentMana(EventCommand): nid = 'set_current_mana' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["Unit", "PositiveInteger"] class Resurrect(EventCommand): nid = 'resurrect' tag = Tags.ADD_REMOVE_INTERACT_WITH_UNITS keywords = ["GlobalUnit"] class Reset(EventCommand): nid = 'reset' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["Unit"] class HasAttacked(EventCommand): nid = 'has_attacked' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["Unit"] class HasTraded(EventCommand): nid = 'has_traded' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ['Unit'] class AddGroup(EventCommand): nid = 'add_group' tag = Tags.UNIT_GROUPS keywords = ["Group"] optional_keywords = ["StartingGroup", "EntryType", "Placement"] flags = ["create"] class SpawnGroup(EventCommand): nid = 'spawn_group' tag = Tags.UNIT_GROUPS keywords = ["Group", "CardinalDirection", "StartingGroup"] optional_keywords = ["EntryType", "Placement"] flags = ["create", "no_block", 'no_follow'] class MoveGroup(EventCommand): nid = 'move_group' nickname = 'morph_group' tag = Tags.UNIT_GROUPS keywords = ["Group", "StartingGroup"] optional_keywords = ["MovementType", "Placement"] flags = ['no_block', 'no_follow'] class RemoveGroup(EventCommand): nid = 'remove_group' tag = Tags.UNIT_GROUPS keywords = ["Group"] optional_keywords = ["RemoveType"] class GiveItem(EventCommand): nid = 'give_item' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Item"] flags = ['no_banner', 'no_choice', 'droppable'] class RemoveItem(EventCommand): nid = 'remove_item' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Item"] flags = ['no_banner'] class GiveMoney(EventCommand): nid = 'give_money' tag = Tags.GAME_VARS keywords = ["Integer"] optional_keywords = ["Party"] flags = ['no_banner'] class GiveBexp(EventCommand): nid = 'give_bexp' tag = Tags.GAME_VARS keywords = ["Condition"] optional_keywords = ["Party", "String"] flags = ['no_banner'] class GiveExp(EventCommand): nid = 'give_exp' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "PositiveInteger"] class SetExp(EventCommand): nid = 'set_exp' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "PositiveInteger"] class GiveWexp(EventCommand): nid = 'give_wexp' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "WeaponType", "Integer"] flags = ['no_banner'] class GiveSkill(EventCommand): nid = 'give_skill' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Skill"] flags = ['no_banner'] class RemoveSkill(EventCommand): nid = 'remove_skill' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Skill"] flags = ['no_banner'] class ChangeAI(EventCommand): nid = 'change_ai' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "AI"] class ChangeTeam(EventCommand): nid = 'change_team' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Team"] class ChangePortrait(EventCommand): nid = 'change_portrait' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "PortraitNid"] class ChangeStats(EventCommand): nid = 'change_stats' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "StatList"] flags = ['immediate'] class SetStats(EventCommand): nid = 'set_stats' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "StatList"] flags = ['immediate'] class AutolevelTo(EventCommand): nid = 'autolevel_to' tag = Tags.MODIFY_UNIT_PROPERTIES # Second argument is level that is eval'd keywords = ["GlobalUnit", "String"] # Whether to actually change the unit's level flags = ["hidden"] class SetModeAutolevels(EventCommand): nid = 'set_mode_autolevels' tag = Tags.GAME_VARS keywords = ["String"] # Whether to actually change the unit's level flags = ["hidden"] class Promote(EventCommand): nid = 'promote' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit"] optional_keywords = ["Klass"] class ChangeClass(EventCommand): nid = 'change_class' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit"] optional_keywords = ["Klass"] class AddTag(EventCommand): nid = 'add_tag' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Tag"] class RemoveTag(EventCommand): nid = 'remove_tag' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ["GlobalUnit", "Tag"] class AddTalk(EventCommand): nid = 'add_talk' tag = Tags.LEVEL_VARS keywords = ["Unit", "Unit"] class RemoveTalk(EventCommand): nid = 'remove_talk' tag = Tags.LEVEL_VARS keywords = ["Unit", "Unit"] class AddLore(EventCommand): nid = 'add_lore' nickname = 'unlock_lore' tag = Tags.GAME_VARS keywords = ["Lore"] class RemoveLore(EventCommand): nid = 'remove_lore' tag = Tags.GAME_VARS keywords = ["Lore"] class AddBaseConvo(EventCommand): nid = 'add_base_convo' tag = Tags.LEVEL_VARS keywords = ["String"] class IgnoreBaseConvo(EventCommand): nid = 'ignore_base_convo' tag = Tags.LEVEL_VARS keywords = ["String"] class RemoveBaseConvo(EventCommand): nid = 'remove_base_convo' tag = Tags.LEVEL_VARS keywords = ["String"] class IncrementSupportPoints(EventCommand): nid = 'increment_support_points' tag = Tags.MODIFY_UNIT_PROPERTIES keywords = ['GlobalUnit', 'GlobalUnit', 'PositiveInteger'] class AddMarketItem(EventCommand): nid = 'add_market_item' tag = Tags.GAME_VARS keywords = ["Item"] class RemoveMarketItem(EventCommand): nid = 'remove_market_item' tag = Tags.GAME_VARS keywords = ["Item"] class AddRegion(EventCommand): nid = 'add_region' tag = Tags.REGION keywords = ["Nid", "Position", "Size", "RegionType"] optional_keywords = ["String"] flags = ["only_once"] class RegionCondition(EventCommand): nid = 'region_condition' tag = Tags.REGION keywords = ["Nid", "Condition"] class RemoveRegion(EventCommand): nid = 'remove_region' tag = Tags.REGION keywords = ["Nid"] class ShowLayer(EventCommand): nid = 'show_layer' tag = Tags.TILEMAP keywords = ["Layer"] optional_keywords = ["LayerTransition"] class HideLayer(EventCommand): nid = 'hide_layer' tag = Tags.TILEMAP keywords = ["Layer"] optional_keywords = ["LayerTransition"] class AddWeather(EventCommand): nid = 'add_weather' tag = Tags.TILEMAP keywords = ["Weather"] class RemoveWeather(EventCommand): nid = 'remove_weather' tag = Tags.TILEMAP keywords = ["Weather"] class ChangeObjectiveSimple(EventCommand): nid = 'change_objective_simple' tag = Tags.LEVEL_VARS keywords = ["String"] class ChangeObjectiveWin(EventCommand): nid = 'change_objective_win' tag = Tags.LEVEL_VARS keywords = ["String"] class ChangeObjectiveLoss(EventCommand): nid = 'change_objective_loss' tag = Tags.LEVEL_VARS keywords = ["String"] class SetPosition(EventCommand): nid = 'set_position' tag = Tags.MISCELLANEOUS keywords = ["String"] class MapAnim(EventCommand): nid = 'map_anim' tag = Tags.TILEMAP keywords = ["MapAnim", "Position"] flags = ["no_block"] class ArrangeFormation(EventCommand): nid = 'arrange_formation' tag = Tags.MISCELLANEOUS # Puts units on formation tiles automatically class Prep(EventCommand): nid = 'prep' tag = Tags.MISCELLANEOUS optional_keywords = ["Bool", "Music"] # Pick units class Base(EventCommand): nid = 'base' tag = Tags.MISCELLANEOUS keywords = ["Panorama"] optional_keywords = ["Music"] class Shop(EventCommand): nid = 'shop' tag = Tags.MISCELLANEOUS keywords = ["Unit", "ItemList"] optional_keywords = ["ShopFlavor"] class Choice(EventCommand): nid = 'choice' tag = Tags.MISCELLANEOUS keywords = ['Nid', 'String', 'StringList'] optional_keywords = ['Orientation'] class ChapterTitle(EventCommand): nid = 'chapter_title' tag = Tags.MISCELLANEOUS optional_keywords = ["Music", "String"] class Alert(EventCommand): nid = 'alert' tag = Tags.DIALOGUE_TEXT keywords = ["String"] class VictoryScreen(EventCommand): nid = 'victory_screen' tag = Tags.MISCELLANEOUS class RecordsScreen(EventCommand): nid = 'records_screen' tag = Tags.MISCELLANEOUS class LocationCard(EventCommand): nid = 'location_card' tag = Tags.DIALOGUE_TEXT keywords = ["String"] class Credits(EventCommand): nid = 'credits' tag = Tags.DIALOGUE_TEXT keywords = ["String", "String"] flags = ['wait', 'center', 'no_split'] class Ending(EventCommand): nid = 'ending' tag = Tags.DIALOGUE_TEXT keywords = ["Portrait", "String", "String"] class PopDialog(EventCommand): nid = 'pop_dialog' tag = Tags.DIALOGUE_TEXT desc = \ """ Removes the most recent dialog text box from the screen. Generally only used in conjunction with the `ending` command to remove the Ending box during a transition. Example: ``` ending;Coyote;Coyote, Man of Mystery;Too mysterious for words. transition;Close pop_dialog transition;Open ``` """ class Unlock(EventCommand): nid = 'unlock' tag = Tags.REGION keywords = ["Unit"] class FindUnlock(EventCommand): nid = 'find_unlock' tag = Tags.HIDDEN keywords = ["Unit"] class SpendUnlock(EventCommand): nid = 'spend_unlock' tag = Tags.HIDDEN keywords = ["Unit"] class TriggerScript(EventCommand): nid = 'trigger_script' tag = Tags.MISCELLANEOUS keywords = ["Event"] optional_keywords = ["GlobalUnit", "GlobalUnit"] class ChangeRoaming(EventCommand): nid = 'change_roaming' tag = Tags.MISCELLANEOUS desc = "Turn free roam mode on or off" keywords = ["Bool"] class ChangeRoamingUnit(EventCommand): nid = 'change_roaming_unit' tag = Tags.MISCELLANEOUS desc = "Changes the level's current roaming unit." keywords = ["Unit"] class CleanUpRoaming(EventCommand): nid = 'clean_up_roaming' tag = Tags.MISCELLANEOUS desc = "Removes all units other than the roaming unit" keywords = [] class AddToInitiative(EventCommand): nid = 'add_to_initiative' tag = Tags.MISCELLANEOUS desc = "Adds the specified unit to the specified point in the initiative order. 0 is the current initiative position." keywords = ["Unit", "Integer"] class MoveInInitiative(EventCommand): nid = 'move_in_initiative' tag = Tags.MISCELLANEOUS desc = "Moves the initiative of the specified unit." keywords = ["Unit", "Integer"] def get_commands(): return EventCommand.__subclasses__() def restore_command(dat): if len(dat) == 2: nid, values = dat display_values = None elif len(dat) == 3: nid, values, display_values = dat subclasses = EventCommand.__subclasses__() for command in subclasses: if command.nid == nid: copy = command(values, display_values) return copy print("Couldn't restore event command!") print(nid, values, display_values) return None def parse_text(text): if text.startswith('#'): return Comment([text]) arguments = text.split(';') command_nid = arguments[0] subclasses = EventCommand.__subclasses__() for command in subclasses: if command.nid == command_nid or command.nickname == command_nid: cmd_args = arguments[1:] true_cmd_args = [] command_info = command() for idx, arg in enumerate(cmd_args): if idx < len(command_info.keywords): cmd_keyword = command_info.keywords[idx] elif idx - len(command_info.keywords) < len(command_info.optional_keywords): cmd_keyword = command_info.optional_keywords[idx - len(command_info.keywords)] else: cmd_keyword = "N/A" # if parentheses exists, then they contain the "true" arg, with everything outside parens essentially as comments if '(' in arg and ')' in arg and not cmd_keyword == 'Condition': true_arg = arg[arg.find("(")+1:arg.find(")")] true_cmd_args.append(true_arg) else: true_cmd_args.append(arg) copy = command(true_cmd_args, cmd_args) return copy return None def parse(command): values = command.values num_keywords = len(command.keywords) true_values = values[:num_keywords] flags = {v for v in values[num_keywords:] if v in command.flags} optional_keywords = [v for v in values[num_keywords:] if v not in flags] true_values += optional_keywords return true_values, flags
7,366
4745d81558130440d35d277b586572f5d3f85c06
import unittest import userinput class Testing(unittest.TestCase): def test_creation(self): x = userinput.UserInput() self.assertNotEqual(x, None) def test_charset_initialization(self): x = userinput.UserInput() self.assertEqual(x.character_set, userinput.CHARACTERS) def test_charset_display(self): x = userinput.UserInput() self.assertEqual(str(x.character_set), str(x.display_characters())) def test_charset_remove(self): x = userinput.UserInput() # my favourite character :) x.remove_character('پ') self.assertNotIn('پ', x.character_set) def test_charset_remove_missing(self): x = userinput.UserInput() # my favourite character :) try: x.remove_character('+') self.assertFalse(False) except KeyError: self.assertTrue(True) if __name__ == '__main__': unittest.main()
7,367
e14b8d0f85042ceda955022bee08b3b3b4c2361d
# Generated by Django 3.0.8 on 2021-03-25 13:47 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('Asha', '0005_baby'), ] operations = [ migrations.AlterField( model_name='baby', name='Auth_Id', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Asha.BasicDetails'), ), ]
7,368
a0460b100a750b685f3e831a19379b0e26da4b35
# x = 10 # # def increment(): # x += 1 # # ^^ Non-working code x = 10 def increment(number): number += 1 return number # If we want to change a global variable, # we have to do it like this x = increment(x)
7,369
72b5e76f63e347d7275b0b711fa02b7f327785f6
#!/usr/bin/python import os import sys fdatadir = "/fdata/hepx/store/user/taohuang/NANOAOD/" datasets = []; NumSample = []; sampleN_short = [] Nanodatasets = []; localdirs = {} MCxsections = [] #doTT=True; doDY=True; doVV=True; doSingleT=True; doWjets=True; dottV=True ##DoubleEG datasets.append('/DoubleEG/Run2016B-05Feb2018_ver1-v1/NANOAOD') NumSample.append('-1'); sampleN_short.append('DoubleEGRun2016Bver1') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016B-05Feb2018_ver2-v1/NANOAOD') NumSample.append('-2'); sampleN_short.append('DoubleEGRun2016Bver2') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016C-05Feb2018-v1/NANOAOD') NumSample.append('-3'); sampleN_short.append('DoubleEGRun2016C') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016D-05Feb2018-v1/NANOAOD') NumSample.append('-4'); sampleN_short.append('DoubleEGRun2016D') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016E-05Feb2018-v1/NANOAOD') NumSample.append('-5'); sampleN_short.append('DoubleEGRun2016E') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016F-05Feb2018-v1/NANOAOD') NumSample.append('-6'); sampleN_short.append('DoubleEGRun2016F') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016G-05Feb2018-v1/NANOAOD') NumSample.append('-7'); sampleN_short.append('DoubleEGRun2016G') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016H-05Feb2018_ver2-v1/NANOAOD') NumSample.append('-8'); sampleN_short.append('DoubleEGRun2016Hver2') MCxsections.append(-1.0) datasets.append('/DoubleEG/Run2016H-05Feb2018_ver3-v1/NANOAOD') NumSample.append('-9'); sampleN_short.append('DoubleEGRun2016Hver3') MCxsections.append(-1.0) ##DoubleMuon datasets.append('/DoubleMuon/Run2016B-05Feb2018_ver1-v1/NANOAOD') NumSample.append('-10'); sampleN_short.append('DoubleMuonRun2016Bver1') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016B-05Feb2018_ver2-v1/NANOAOD') NumSample.append('-11'); sampleN_short.append('DoubleMuonRun2016Bver2') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016C-05Feb2018-v1/NANOAOD') NumSample.append('-12'); sampleN_short.append('DoubleMuonRun2016C') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016D-05Feb2018-v1/NANOAOD') NumSample.append('-13'); sampleN_short.append('DoubleMuonRun2016D') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016E-05Feb2018-v1/NANOAOD') NumSample.append('-14'); sampleN_short.append('DoubleMuonRun2016E') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016F-05Feb2018-v1/NANOAOD') NumSample.append('-15'); sampleN_short.append('DoubleMuonRun2016F') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016G-05Feb2018-v1/NANOAOD') NumSample.append('-16'); sampleN_short.append('DoubleMuonRun2016G') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016H-05Feb2018_ver2-v1/NANOAOD') NumSample.append('-17'); sampleN_short.append('DoubleMuonRun2016Hver2') MCxsections.append(-1.0) datasets.append('/DoubleMuon/Run2016H-05Feb2018_ver3-v1/NANOAOD') NumSample.append('-18'); sampleN_short.append('DoubleMuonRun2016Hver3') MCxsections.append(-1.0) #MuonEG datasets.append('/MuonEG/Run2016B-05Feb2018_ver1-v1/NANOAOD') NumSample.append('-19'); sampleN_short.append('MuonEGRun2016Bver2') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016B-05Feb2018_ver2-v1/NANOAOD') NumSample.append('-20'); sampleN_short.append('MuonEGRun2016Bver2') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016C-05Feb2018-v1/NANOAOD') NumSample.append('-21'); sampleN_short.append('MuonEGRun2016C') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016D-05Feb2018-v1/NANOAOD') NumSample.append('-22'); sampleN_short.append('MuonEGRun2016D') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016E-05Feb2018-v1/NANOAOD') NumSample.append('-23'); sampleN_short.append('MuonEGRun2016E') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016F-05Feb2018-v1/NANOAOD') NumSample.append('-24'); sampleN_short.append('MuonEGRun2016F') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016G-05Feb2018-v1/NANOAOD') NumSample.append('-25'); sampleN_short.append('MuonEGRun2016G') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016H-05Feb2018_ver2-v1/NANOAOD') NumSample.append('-26'); sampleN_short.append('MuonEGRun2016Hver2') MCxsections.append(-1.0) datasets.append('/MuonEG/Run2016H-05Feb2018_ver3-v1/NANOAOD') NumSample.append('-27'); sampleN_short.append('MuonEGRun2016Hver3') MCxsections.append(-1.0) masspoints = [260, 270, 300, 350, 400, 450, 500, 550, 600, 650, 750, 800, 900] for mass in masspoints: datasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-%d_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM"%mass) NumSample.append(masspoints.index(mass)); sampleN_short.append('RadionM%d'%mass) MCxsections.append(5.0)#by default, assume the cross section for signal is 5pb #datasets.append("/GluGluToBulkGravitonToHHTo2B2VTo2L2Nu_M-*_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") #NumSample.append('2'); sampleN_short.append('Graviton') # TT## FIXME, use official one later #datasets.append('/TTTo2L2Nu_13TeV-powheg/RunIISpring16MiniAODv2-PUSpring16_80X_mcRun2_asymptotic_2016_miniAODv2_v0_ext1-v1/MINIAODSIM') datasets.append('/TTTo2L2Nu_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') #datasets.append('/TTTo2L2Nu_TuneCP5_13TeV-powheg-pythia8/arizzi-RunIIFall17MiniAOD-94X-Nano01Fall17-e273b12d9f89d622a34e4bc98b05ee29/USER') NumSample.append('13'); sampleN_short.append('TT') #MCxsections.append(72.1) #MCxsections.append(76.7) MCxsections.append(87.31) # DY #datasets.append('/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') datasets.append('/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('14'); sampleN_short.append('DY') MCxsections.append(18610.0) datasets.append('/DYToLL_0J_13TeV-amcatnloFXFX-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('15'); sampleN_short.append('DY') MCxsections.append(4758.9) datasets.append('/DYToLL_1J_13TeV-amcatnloFXFX-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('16'); sampleN_short.append('DY') MCxsections.append(929.1) datasets.append('/DYToLL_2J_13TeV-amcatnloFXFX-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('17'); sampleN_short.append('DY') MCxsections.append(337.1) # VV datasets.append('/ZZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('18'); sampleN_short.append('VV') MCxsections.append(3.22) datasets.append('/ZZTo2L2Nu_13TeV_powheg_pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('19'); sampleN_short.append('VV') MCxsections.append(0.564) datasets.append('/ZZTo4L_13TeV_powheg_pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('20'); sampleN_short.append('VV') MCxsections.append(1.256) #datasets.append('/WWToLNuQQ_aTGC_13TeV-madgraph-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') #NumSample.append('21'); sampleN_short.append('VV') #MCxsections.append(49.997)# ## not available now because of pdf uncertainty #FIXME #datasets.append('/WWTo2L2Nu_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') #datasets.append('/WWTo2L2Nu_13TeV-powheg/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') #NumSample.append('22'); sampleN_short.append('VV') ### not available now #MCxsections.append(12.178) datasets.append('/WZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('23'); sampleN_short.append('VV') MCxsections.append(5.595) #FIXME #datasets.append('/WZTo1L3Nu_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') #NumSample.append('24'); sampleN_short.append('VV') ### not available now #MCxsections.append(3.033) datasets.append('/WZTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v3/MINIAODSIM') NumSample.append('25'); sampleN_short.append('VV') MCxsections.append(10.71) datasets.append('/WZTo3LNu_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('26'); sampleN_short.append('VV') MCxsections.append(4.42965) ##sT datasets.append('/ST_t-channel_top_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('27'); sampleN_short.append('sT') MCxsections.append(136.02) datasets.append('/ST_t-channel_antitop_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('28'); sampleN_short.append('sT') MCxsections.append(80.95) datasets.append('/ST_s-channel_4f_leptonDecays_13TeV-amcatnlo-pythia8_TuneCUETP8M1/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('29'); sampleN_short.append('sT') MCxsections.append(3.36) datasets.append('/ST_tW_antitop_5f_NoFullyHadronicDecays_13TeV-powheg_TuneCUETP8M1/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('30'); sampleN_short.append('sT') MCxsections.append(19.5545) datasets.append('/ST_tW_top_5f_NoFullyHadronicDecays_13TeV-powheg_TuneCUETP8M1/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('31'); sampleN_short.append('sT') MCxsections.append(19.5545) # W + Jets datasets.append('/WJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('32'); sampleN_short.append('Wjet') MCxsections.append(61526.7) datasets.append('/WJetsToLNu_HT-100To200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM') NumSample.append('33'); sampleN_short.append('Wjet') MCxsections.append(1627.45) datasets.append('/WJetsToLNu_HT-200To400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM') NumSample.append('34'); sampleN_short.append('Wjet') MCxsections.append(435.237) datasets.append('/WJetsToLNu_HT-400To600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('35'); sampleN_short.append('Wjet') MCxsections.append(59.181) #FIXME #datasets.append('/WJetsToLNu_HT-600To800_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') #NumSample.append('36'); sampleN_short.append('Wjet')### not available now MCxsections.append(14.58) datasets.append('/WJetsToLNu_HT-800To1200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('37'); sampleN_short.append('Wjet') MCxsections.append(6.656) datasets.append('/WJetsToLNu_HT-1200To2500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('38'); sampleN_short.append('Wjet') MCxsections.append(1.608) datasets.append('/WJetsToLNu_HT-2500ToInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM') NumSample.append('39'); sampleN_short.append('Wjet') MCxsections.append(0.0389) # tt + V datasets.append('/TTWJetsToQQ_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('40'); sampleN_short.append('ttV') MCxsections.append(0.4062) datasets.append('/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext2-v1/MINIAODSIM') NumSample.append('41'); sampleN_short.append('ttV') MCxsections.append(0.2043) datasets.append('/TTZToQQ_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM') NumSample.append('42'); sampleN_short.append('ttV') MCxsections.append(0.5297) datasets.append('/TTZToLLNuNu_M-10_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext3-v1/MINIAODSIM') NumSample.append('43'); sampleN_short.append('ttV') MCxsections.append(0.2529) alljobtypes = set(sampleN_short) for job in alljobtypes: localdirs[job] = [] for ijob, job in enumerate(datasets): nsample = int(NumSample[ijob]) jobtype = sampleN_short[ijob] dataname = "" datadir = " " #print "nsample ",nsample, " jobtype ",jobtype if nsample < 0: datadir = sampleN_short[ijob] dataname = job #print "real data nsample ",nsample, " datadir ",datadir elif nsample > 0: datadir = job.split('/')[1] #print "MC nsample ",nsample, " datadir ",datadir, "MiniAOD dataset ",job.split('/') #query = "dataset dataset=/%s/*/NANOAODSIM"%(datadir) #pdata = os.popen("dasgoclient -limit=0 -query='{query}'".format(query = query)) #founddataset = False #for line in pdata: # #print "dataset ",line," datatype ",datadir # if datadir in line: # founddataset = True # dataname = line[:-1] #if not(founddataset): # print "WARNING!!!!! no dataset found for ",datadir localdirs[jobtype].append(os.path.join(fdatadir, datadir)) Nanodatasets.append("/DoubleEG/Run2016B-05Feb2018_ver1-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016B-05Feb2018_ver2-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016C-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016D-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016E-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016F-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016G-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016H-05Feb2018_ver2-v1/NANOAOD") Nanodatasets.append("/DoubleEG/Run2016H-05Feb2018_ver3-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016B-05Feb2018_ver1-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016B-05Feb2018_ver2-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016C-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016D-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016E-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016F-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016G-05Feb2018-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016H-05Feb2018_ver2-v1/NANOAOD") Nanodatasets.append("/DoubleMuon/Run2016H-05Feb2018_ver3-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016B-05Feb2018_ver1-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016B-05Feb2018_ver2-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016C-05Feb2018-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016D-05Feb2018-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016E-05Feb2018-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016F-05Feb2018-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016G-05Feb2018-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016H-05Feb2018_ver2-v1/NANOAOD") Nanodatasets.append("/MuonEG/Run2016H-05Feb2018_ver3-v1/NANOAOD") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-260_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-270_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-300_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-350_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-400_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-450_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-500_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-550_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-600_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-650_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-750_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-800_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/GluGluToRadionToHHTo2B2VTo2L2Nu_M-900_narrow_13TeV-madgraph-v2/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") #TTbar #Nanodatasets.append("/TTTo2L2Nu_TuneCP5_13TeV-powheg-pythia8/arizzi-RunIIFall17MiniAOD-94X-Nano01Fall17-e273b12d9f89d622a34e4bc98b05ee29/USER") Nanodatasets.append('/TTTo2L2Nu_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM') # DY Nanodatasets.append("/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/DYToLL_0J_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") Nanodatasets.append("/DYToLL_1J_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") Nanodatasets.append("/DYToLL_2J_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") # VV Nanodatasets.append("/ZZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/ZZTo2L2Nu_13TeV_powheg_pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/ZZTo4L_13TeV_powheg_pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") #Nanodatasets.append("/WWToLNuQQ_aTGC_13TeV-madgraph-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/WZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/WZTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/WZTo3LNu_TuneCUETP8M1_13TeV-powheg-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") #sT Nanodatasets.append("/ST_t-channel_top_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/ST_t-channel_antitop_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/ST_s-channel_4f_leptonDecays_13TeV-amcatnlo-pythia8_TuneCUETP8M1/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/ST_tW_antitop_5f_NoFullyHadronicDecays_13TeV-powheg_TuneCUETP8M1/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/ST_tW_top_5f_NoFullyHadronicDecays_13TeV-powheg_TuneCUETP8M1/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") #W+jets Nanodatasets.append("/WJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext2-v1/NANOAODSIM") Nanodatasets.append("/WJetsToLNu_HT-100To200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext2-v1/NANOAODSIM") Nanodatasets.append("/WJetsToLNu_HT-200To400_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext2-v1/NANOAODSIM") Nanodatasets.append("/WJetsToLNu_HT-400To600_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") Nanodatasets.append("/WJetsToLNu_HT-800To1200_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") Nanodatasets.append("/WJetsToLNu_HT-1200To2500_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/WJetsToLNu_HT-2500ToInf_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext1-v1/NANOAODSIM") # tt + V Nanodatasets.append("/TTWJetsToQQ_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/TTWJetsToLNu_TuneCUETP8M1_13TeV-amcatnloFXFX-madspin-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext2-v1/NANOAODSIM") Nanodatasets.append("/TTZToQQ_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2-v1/NANOAODSIM") Nanodatasets.append("/TTZToLLNuNu_M-10_TuneCUETP8M1_13TeV-amcatnlo-pythia8/RunIISummer16NanoAOD-PUMoriond17_05Feb2018_94X_mcRun2_asymptotic_v2_ext3-v1/NANOAODSIM") outAnalist = {} outAnadir = "/fdata/hepx/store/user/taohuang/HHNtuple_20180328_fixedleptonDZeff/" for i,datasetname in enumerate( Nanodatasets ): sampleName = sampleN_short[i] if NumSample[i] < 0: sampleName = "Data" outAnafile = os.path.join(outAnadir, Nanodatasets[i].split('/')[1]) if hasattr(outAnalist, sampleName): outAnalist[sampleName].append(outAnafile) else: outAnalist[sampleName] = [] outAnalist[sampleName].append(outAnafile) dataintxt = open("2016MCSamplelist.txt","w+") for dataset in datasets: dataintxt.write(dataset+"\n") dataintxt.close()
7,370
975b2f3443e19f910c71f872484350aef9f09dd2
class Solution: def minimumDeviation(self, nums: List[int]) -> int: hq, left, right, res = [], inf, 0, inf for num in nums: if num % 2: num = num * 2 heapq.heappush(hq, -num) left = min(left, num) while True: right = -heapq.heappop(hq) if right - left < res: res = right - left if right % 2 == 0: heapq.heappush(hq, -right // 2) left = min(left, right // 2) else: break return res
7,371
452d5d98b6c0b82a1f4ec18f29d9710a8c0f4dc9
""" This handy script will download all wallpapears from simpledesktops.com Requirements ============ BeautifulSoup - http://www.crummy.com/software/BeautifulSoup/ Python-Requests - http://docs.python-requests.org/en/latest/index.html Usage ===== cd /path/to/the/script/ python simpledesktops.py """ from StringIO import StringIO from bs4 import BeautifulSoup import requests import os try: os.mkdir('walls') except OSError: pass page = 1 while True: page_request = requests.get('http://simpledesktops.com/browse/%s/' % page) if page_request.status_code != 200: print 'page %s does not exist' % page break html = BeautifulSoup(page_request.text) images = html.findAll('img') for image in images: img_src = image['src'] if 'static.simpledesktops.com/desktops/' in img_src: full_size_img = img_src.replace('.295x184_q100.png', '') img_name = full_size_img.split('/')[-1] img_request = requests.get(full_size_img) img_buffer = StringIO(img_request.content) img_file = open('walls/%s' % img_name, 'wb') img_file.write(img_buffer.getvalue()) img_file.close() print '%s downloaded' % img_name print '\n================' print 'page %s finished' % page print '================\n' page += 1
7,372
7c3569c43d27ba605c0dba420690e18d7f849965
from django import forms from .models import User,Profile from django.contrib.auth.forms import UserCreationForm class ProfileForm(forms.ModelForm): ''' Form for the profile ''' class Meta: model = Profile exclude = ('user',) ## we will create the user with the signals class SignUpForm(UserCreationForm): ''' Sign up form fetching form the User creation form and the email and password is necessary not the user ''' class Meta: model = User fields = ('email','password1','password2')
7,373
4dfdbc692858a627248cbe47d19b43c2a27ec70e
#!/usr/bin/env python # Core Library modules import os # Third party modules import nose # First party modules import lumixmaptool.copy as copy # Tests def get_parser_test(): """Check if the evaluation model returns a parser object.""" copy.get_parser() def parse_mapdata_test(): current_folder = os.path.dirname(os.path.realpath(__file__)) misc_folder = os.path.join(current_folder, "misc") maplistdata_path = os.path.join(misc_folder, "MapList.dat") result = copy.parse_mapdata(maplistdata_path) expected = { "num1": "00010001", "num2": "00010001", "regions": { 1: [ "BACK/B0000035.DFT", "BACK/B0000036.DFT", "BACK/B0000044.DFT", "BACK/B0000045.DFT", "BACK/B0000053.DFT", "BACK/B0000054.DFT", "NAME/N0000035.DFT", "NAME/N0000036.DFT", "NAME/N0000044.DFT", "NAME/N0000045.DFT", "NAME/N0000053.DFT", "NAME/N0000054.DFT", "POI/P0000035.DFT", "POI/P0000036.DFT", "POI/P0000044.DFT", "POI/P0000045.DFT", "POI/P0000053.DFT", "POI/P0000054.DFT", ], 2: [ "BACK/B0000024.DFT", "BACK/B0000025.DFT", "BACK/B0000026.DFT", "BACK/B0000027.DFT", "BACK/B0000033.DFT", "BACK/B0000034.DFT", "BACK/B0000035.DFT", "BACK/B0000036.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "BACK/B0000044.DFT", "BACK/B0000045.DFT", "NAME/N0000024.DFT", "NAME/N0000025.DFT", "NAME/N0000026.DFT", "NAME/N0000027.DFT", "NAME/N0000033.DFT", "NAME/N0000034.DFT", "NAME/N0000035.DFT", "NAME/N0000036.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "NAME/N0000044.DFT", "NAME/N0000045.DFT", "POI/P0000024.DFT", "POI/P0000025.DFT", "POI/P0000026.DFT", "POI/P0000027.DFT", "POI/P0000033.DFT", "POI/P0000034.DFT", "POI/P0000035.DFT", "POI/P0000036.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", "POI/P0000044.DFT", "POI/P0000045.DFT", ], 3: [ "BACK/B0000001.DFT", "BACK/B0000008.DFT", "BACK/B0000009.DFT", "BACK/B0000010.DFT", "BACK/B0000017.DFT", "BACK/B0000018.DFT", "BACK/B0000019.DFT", "BACK/B0000026.DFT", "BACK/B0000027.DFT", "NAME/N0000001.DFT", "NAME/N0000008.DFT", "NAME/N0000009.DFT", "NAME/N0000010.DFT", "NAME/N0000017.DFT", "NAME/N0000018.DFT", "NAME/N0000019.DFT", "NAME/N0000026.DFT", "NAME/N0000027.DFT", "POI/P0000017.DFT", "POI/P0000018.DFT", "POI/P0000019.DFT", "POI/P0000026.DFT", "POI/P0000027.DFT", ], 4: [ "BACK/B0000019.DFT", "BACK/B0000020.DFT", "BACK/B0000021.DFT", "BACK/B0000022.DFT", "BACK/B0000027.DFT", "BACK/B0000028.DFT", "BACK/B0000029.DFT", "BACK/B0000030.DFT", "BACK/B0000031.DFT", "BACK/B0000036.DFT", "BACK/B0000037.DFT", "BACK/B0000038.DFT", "BACK/B0000039.DFT", "BACK/B0000040.DFT", "BACK/B0000045.DFT", "BACK/B0000046.DFT", "BACK/B0000047.DFT", "BACK/B0000048.DFT", "BACK/B0000049.DFT", "BACK/B0000054.DFT", "NAME/N0000019.DFT", "NAME/N0000020.DFT", "NAME/N0000021.DFT", "NAME/N0000022.DFT", "NAME/N0000027.DFT", "NAME/N0000028.DFT", "NAME/N0000029.DFT", "NAME/N0000030.DFT", "NAME/N0000031.DFT", "NAME/N0000036.DFT", "NAME/N0000037.DFT", "NAME/N0000038.DFT", "NAME/N0000039.DFT", "NAME/N0000040.DFT", "NAME/N0000045.DFT", "NAME/N0000046.DFT", "NAME/N0000047.DFT", "NAME/N0000048.DFT", "NAME/N0000049.DFT", "NAME/N0000054.DFT", "POI/P0000019.DFT", "POI/P0000020.DFT", "POI/P0000021.DFT", "POI/P0000022.DFT", "POI/P0000027.DFT", "POI/P0000028.DFT", "POI/P0000029.DFT", "POI/P0000030.DFT", "POI/P0000031.DFT", "POI/P0000036.DFT", "POI/P0000037.DFT", "POI/P0000038.DFT", "POI/P0000039.DFT", "POI/P0000040.DFT", "POI/P0000045.DFT", "POI/P0000046.DFT", "POI/P0000047.DFT", "POI/P0000048.DFT", "POI/P0000049.DFT", "POI/P0000054.DFT", ], 5: [ "BACK/B0000002.DFT", "BACK/B0000003.DFT", "BACK/B0000004.DFT", "BACK/B0000011.DFT", "BACK/B0000012.DFT", "BACK/B0000013.DFT", "BACK/B0000020.DFT", "BACK/B0000021.DFT", "BACK/B0000022.DFT", "BACK/B0000029.DFT", "BACK/B0000030.DFT", "BACK/B0000031.DFT", "NAME/N0000002.DFT", "NAME/N0000003.DFT", "NAME/N0000004.DFT", "NAME/N0000011.DFT", "NAME/N0000012.DFT", "NAME/N0000013.DFT", "NAME/N0000020.DFT", "NAME/N0000021.DFT", "NAME/N0000022.DFT", "NAME/N0000029.DFT", "NAME/N0000030.DFT", "NAME/N0000031.DFT", "POI/P0000003.DFT", "POI/P0000011.DFT", "POI/P0000012.DFT", "POI/P0000013.DFT", "POI/P0000020.DFT", "POI/P0000021.DFT", "POI/P0000022.DFT", "POI/P0000029.DFT", "POI/P0000030.DFT", "POI/P0000031.DFT", ], 6: [ "BACK/B0000040.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000049.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "NAME/N0000040.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000049.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "POI/P0000040.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000049.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", ], 7: [ "BACK/B0000032.DFT", "BACK/B0000033.DFT", "BACK/B0000034.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "BACK/B0000052.DFT", "NAME/N0000032.DFT", "NAME/N0000033.DFT", "NAME/N0000034.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "NAME/N0000052.DFT", "POI/P0000032.DFT", "POI/P0000033.DFT", "POI/P0000034.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", "POI/P0000052.DFT", ], 8: [ "BACK/B0000031.DFT", "BACK/B0000032.DFT", "BACK/B0000033.DFT", "BACK/B0000040.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000049.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "NAME/N0000031.DFT", "NAME/N0000032.DFT", "NAME/N0000033.DFT", "NAME/N0000040.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000049.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "POI/P0000031.DFT", "POI/P0000032.DFT", "POI/P0000033.DFT", "POI/P0000040.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000049.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", ], 9: [ "BACK/B0000005.DFT", "BACK/B0000006.DFT", "BACK/B0000007.DFT", "BACK/B0000014.DFT", "BACK/B0000015.DFT", "BACK/B0000016.DFT", "BACK/B0000023.DFT", "BACK/B0000024.DFT", "BACK/B0000025.DFT", "BACK/B0000032.DFT", "BACK/B0000033.DFT", "BACK/B0000034.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "NAME/N0000005.DFT", "NAME/N0000006.DFT", "NAME/N0000007.DFT", "NAME/N0000014.DFT", "NAME/N0000015.DFT", "NAME/N0000016.DFT", "NAME/N0000023.DFT", "NAME/N0000024.DFT", "NAME/N0000025.DFT", "NAME/N0000032.DFT", "NAME/N0000033.DFT", "NAME/N0000034.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "POI/P0000014.DFT", "POI/P0000015.DFT", "POI/P0000023.DFT", "POI/P0000024.DFT", "POI/P0000025.DFT", "POI/P0000032.DFT", "POI/P0000033.DFT", "POI/P0000034.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", ], 10: [ "BACK/B0000037.DFT", "BACK/B0000041.DFT", "BACK/B0000042.DFT", "BACK/B0000043.DFT", "BACK/B0000044.DFT", "BACK/B0000045.DFT", "BACK/B0000046.DFT", "BACK/B0000050.DFT", "BACK/B0000051.DFT", "BACK/B0000052.DFT", "BACK/B0000053.DFT", "BACK/B0000054.DFT", "NAME/N0000037.DFT", "NAME/N0000041.DFT", "NAME/N0000042.DFT", "NAME/N0000043.DFT", "NAME/N0000044.DFT", "NAME/N0000045.DFT", "NAME/N0000046.DFT", "NAME/N0000050.DFT", "NAME/N0000051.DFT", "NAME/N0000052.DFT", "NAME/N0000053.DFT", "NAME/N0000054.DFT", "POI/P0000037.DFT", "POI/P0000041.DFT", "POI/P0000042.DFT", "POI/P0000043.DFT", "POI/P0000044.DFT", "POI/P0000045.DFT", "POI/P0000046.DFT", "POI/P0000050.DFT", "POI/P0000051.DFT", "POI/P0000052.DFT", "POI/P0000053.DFT", "POI/P0000054.DFT", ], }, } nose.tools.assert_equal(result, expected)
7,374
f15bb4ab93ecb2689bf74687852e60dfa98caea9
"""Project agnostic helper functions that could be migrated to and external lib. """
7,375
0259fddbe3ce030030a508ce7118a6a03930aa51
from flask import Flask import os app = Flask(__name__) @app.route("/healthz") def healthz(): return "ok" @app.route("/alive") def alive(): return "ok" @app.route("/hello") # def healthz(): # introduces application crash bug def hello(): myhost = os.uname()[1] body = ("V1 - Hello World! - %s" % myhost) # body = ("V2 - Hello World! - %s" % myhost) return body if __name__ == "__main__": from waitress import serve serve(app, host="0.0.0.0", port=80)
7,376
5c643dfce9cf7a9f774957ff4819d3be8ac4f1da
#list,for replacing element we use {a[0='']}:for adding element we use {append()} a=['somesh','aakash','sarika','datta','rudra','4mridula'] a[2] = 'nandini' a.append('sarika') print(a[2]) print(a)
7,377
7a920b3609bb29cd26b159b48290fa6978839416
def bullets(chunks): print("bullets") final_string = "Your list in latex can be created with the following command: \n" final_string += "> \\begin{itemize} \n" for e in chunks: print(final_string) final_string += f"> \item {e} \n" final_string += "> \end{itemize}" return final_string def numbers(chunks): print("numbers") final_string = "Your list in latex can be created with the following command: \n" final_string += "> \\begin{enumerate} \n" for e in chunks: print(final_string) final_string += f"> \item {e} \n" final_string += "> \end{enumerate}" return final_string def simple_headline(chunks): print("simple headline") final_string = "Your list in latex can be created with the following command: \n" final_string += "> \\begin{description} \n" for e in chunks: print(final_string) final_string += "> \item["+ e + "]\hfill \\\ \n" final_string += "> \end{description}" return final_string
7,378
f8e287abc7e1a2af005aa93c25d95ce770e29bf9
from odoo import models, fields, api from datetime import datetime, timedelta from odoo import exceptions import logging import math _logger = logging.getLogger(__name__) class BillOfLading(models.Model): _name = 'freight.bol' _description = 'Bill Of Lading' _order = 'date_of_issue desc, write_date desc' _rec_name = 'bol_no' _inherit = ['mail.thread', 'mail.activity.mixin'] # Header bol_status = fields.Selection([('01', 'Draft'), ('02', 'Original'), ('03', 'Surrender'), ('04', 'Telex Release')], string="B/L Status", default="01", copy=False, track_visibility='onchange', store=True) service_type = fields.Selection([('ocean', 'Ocean'), ('air', 'Air'), ('land', 'Land')], string="Shipment Mode", default="ocean", track_visibility='onchange') direction = fields.Selection([('import', 'Import'), ('export', 'Export')], string="Direction", default="export", track_visibility='onchange') cargo_type = fields.Selection([('fcl', 'FCL'), ('lcl', 'LCL')], string='Cargo Type', default="fcl", track_visibility='onchange') type_of_movement = fields.Selection([('cy-cy', 'CY/CY'), ('cy-cfs', 'CY/CFS'), ('cfs-cfs', 'CFS/CFS'), ('cfs-cy', 'CFS/CY')], string='Type Of Movement', track_visibility='onchange') booking_ref = fields.Many2one('freight.booking', string='Booking Job Ref', track_visibility='onchange', copy=False, index=True) no_of_original_bl = fields.Selection([('0', '0'), ('1', '1'), ('3', '3')], string="No Of original B/L", default="0", track_visibility='onchange') doc_form_no = fields.Char(string='Doc. Form No.', track_visibility='onchange') service_contract_no = fields.Char(string='Service Contract No', track_visibility='onchange') bol_no = fields.Char(string='HBL No', copy=False, readonly=True, index=True) carrier_booking_no = fields.Char(string='Carrier Booking No', copy=False, readonly=True) date_of_issue = fields.Date(string='Shipment Date', copy=False, default=datetime.now().date(), track_visibility='onchange', index=True) date_laden_on_board = fields.Date(string='Shipped on Board Date') place_of_issue = fields.Char(string='Place of Issue', track_visibility='onchange') export_reference = fields.Char(string='Export Reference', track_visibility='onchange') fa_reference = fields.Char(string='Forwarding Agent and References', track_visibility='onchange') point_country_origin = fields.Text(string='Point and Country of Origin', track_visibility='onchange') term = fields.Char(string='Term', track_visibility='onchange', help='eg, CY-CY') commodity = fields.Many2one('product.product', string='Commodity', track_visibility='onchange') commodity1 = fields.Many2one('freight.commodity1', string='Commodity', track_visibility='onchange') shipper_load = fields.Boolean('Shipper Load, Seal and Count') analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account", track_visibility='always', copy=False) @api.multi def _get_default_commodity_category(self): commodity_lines = self.env['freight.product.category'].search([('type', '=ilike', 'commodity')]) for commodity_line in commodity_lines: _logger.warning('_get_default_commodity_category=' + str(commodity_line.product_category)) return commodity_line.product_category commodity_category_id = fields.Many2one('product.category', string="Commodity Product Id", default=_get_default_commodity_category) # Invoice Status invoice_status = fields.Selection([('01', 'New'), ('02', 'Partially Invoiced'), ('03', 'Fully Invoiced')], string="Invoice Status", default="01", copy=False, track_visibility='onchange') invoice_paid_status = fields.Selection([('01', 'New'), ('02', 'Partially Paid'), ('03', 'Fully Paid')], string="Invoice Paid Status", default="01", copy=False, track_visibility='onchange') # Party Info customer_name = fields.Many2one('res.partner', string='Customer Name', track_visibility='onchange') contact_name = fields.Many2one('res.partner', string='Contact Name', track_visibility='onchange') shipper = fields.Text(string='Shipper', track_visibility='onchange', help="The Party who shipped the freight, eg Exporter") notify_party = fields.Text(string='Notify Party', help="The Party who will be notified by Liner when the freight arrived", track_visibility='onchange') carrier_c = fields.Many2one('res.partner', string="Carrier") consignee = fields.Text(string='Consignee', help="The Party who received the freight", track_visibility='onchange') routing_instruction = fields.Text(string='For Delivery Of Goods Please Apply To', track_visibility='onchange') delivery_contact = fields.Text(string='Contact for Delivery', help="Contact information for delivery of goods", track_visibility='onchange') unstuff_at = fields.Char(string='Unstuff At', track_visibility='onchange') # Shipment Info voyage_no = fields.Char(string='Voyage No', track_visibility='onchange') vessel = fields.Char(string='Vessel Name', track_visibility='onchange') manifest_no = fields.Char(string='Manifest No', track_visibility='onchange') port_of_loading_input = fields.Char(string='Port of Loading', track_visibility='onchange') port_of_discharge_input = fields.Char(string='Port of Discharge', track_visibility='onchange') port_of_discharge_eta = fields.Date(string='Loading ETA', track_visibility='onchange') place_of_delivery = fields.Char(string='Final Destination', track_visibility='onchange') place_of_receipt = fields.Char(string='Place of Receipt', track_visibility='onchange') pre_carriage_by = fields.Char(string='Pre-Carriage By', track_visibility='onchange') # Remark note = fields.Text(string='Remarks', track_visibility='onchange') # System Info sales_person = fields.Many2one('res.users', string="Salesperson", track_visibility='onchange') company_id = fields.Many2one('res.company', 'Company', required=True, index=True, readonly=1, default=lambda self: self.env.user.company_id.id) # analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account", # track_visibility='always', copy=False) # Line Item cargo_line_ids = fields.One2many('freight.bol.cargo', 'cargo_line', string="Cargo Line", copy=True, auto_join=True, track_visibility='always') charge_line_ids = fields.One2many('freight.bol.charge', 'charge_line', string="Charge Line", copy=True, auto_join=True, track_visibility='always') cost_profit_ids = fields.One2many('freight.bol.cost.profit', 'bol_id', string="Cost & Profit", copy=True, auto_join=True, track_visibility='always') # Not Used invoice_count = fields.Integer(string='Invoice Count', compute='_get_invoiced_count', copy=False) vendor_bill_count = fields.Integer(string='Vendor Bill Count', compute='_get_bill_count', copy=False) si_count = fields.Integer(string='SI Count', compute='_get_si_count', copy=False) shipper_c = fields.Many2one('res.partner', string='Shipper') consignee_c = fields.Many2one('res.partner', string='Consignee Name') notify_party_c = fields.Many2one('res.partner', string='Notify Party') total_no_of_packages_words = fields.Char(string='Total Packages', track_visibility='onchange', help='Total no of packages or container in Words') lines_description = fields.Integer() line_description1 = fields.Text() line_description2 = fields.Text() @api.model def create(self, vals): vals['bol_no'] = self.env['ir.sequence'].next_by_code('hbl') res = super(BillOfLading, self).create(vals) return res @api.multi def name_get(self): result = [] for bol in self: name = str(bol.bol_no) result.append((bol.id, name)) return result @api.multi def action_send_bl(self): self.ensure_one() ir_model_data = self.env['ir.model.data'] try: template_id = \ ir_model_data.get_object_reference('sci_goexcel_freight', 'email_template_bol')[1] except ValueError: template_id = False try: compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1] except ValueError: compose_form_id = False ctx = { 'default_model': 'freight.bol', 'default_res_id': self.ids[0], 'default_use_template': bool(template_id), 'default_template_id': template_id, 'default_composition_mode': 'comment', 'mark_so_as_sent': True, 'custom_layout': "mail.mail_notification_light", # 'proforma': self.env.context.get('proforma', False), 'force_email': True } # base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url') # ctx['action_url'] = "{}/web?db={}".format(base_url, self.env.cr.dbname) return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'views': [(compose_form_id, 'form')], 'view_id': compose_form_id, 'target': 'new', 'context': ctx, } @api.multi def action_invoice(self): self.ensure_one() view = self.env.ref('sci_goexcel_freight.invoice_view_form') return { 'name': 'Create Invoice', 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'invoice.wizard', 'views': [(view.id, 'form')], 'view_id': view.id, 'target': 'new', 'context': dict(bl_id=self.id), } @api.multi def action_create_vendor_bill(self): # only lines with vendor vendor_po = self.cost_profit_ids.filtered(lambda c: c.vendor_id) po_lines = vendor_po.sorted(key=lambda p: p.vendor_id.id) vendor_count = False vendor_id = False if not self.analytic_account_id: values = { 'name': '%s' % self.booking_ref.booking_no, 'partner_id': self.booking_ref.customer_name.id, 'code': self.bol_no, 'company_id': self.booking_ref.company_id.id, } analytic_account = self.env['account.analytic.account'].sudo().create(values) self.booking_ref.write({'analytic_account_id': analytic_account.id}) self.write({'analytic_account_id': analytic_account.id}) for line in po_lines: if line.vendor_id != vendor_id: vb = self.env['account.invoice'] vendor_count = True vendor_id = line.vendor_id value = [] vendor_bill_created = [] filtered_vb_lines = po_lines.filtered(lambda r: r.vendor_id == vendor_id) for vb_line in filtered_vb_lines: if not vb_line.billed: account_id = False price_after_converted = vb_line.cost_price * vb_line.cost_currency_rate if vb_line.product_id.property_account_expense_id: account_id = vb_line.product_id.property_account_expense_id elif vb_line.product_id.categ_id.property_account_expense_categ_id: account_id = vb_line.product_id.categ_id.property_account_expense_categ_id value.append([0, 0, { # 'invoice_id': vendor_bill.id or False, 'account_id': account_id.id or False, 'name': vb_line.product_id.name or '', 'product_id': vb_line.product_id.id or False, 'quantity': vb_line.cost_qty or 0.0, 'uom_id': vb_line.uom_id.id or False, 'price_unit': price_after_converted or 0.0, 'account_analytic_id': self.analytic_account_id.id, 'bl_line_id': vb_line.id, }]) vendor_bill_created.append(vb_line) vb_line.billed = True # print('vendor_id=' + vendor_id.name) if value: vendor_bill_id = vb.create({ 'type': 'in_invoice', 'invoice_line_ids': value, 'default_currency_id': self.env.user.company_id.currency_id.id, 'company_id': self.company_id.id, 'date_invoice': fields.Date.context_today(self), 'origin': self.bol_no, 'partner_id': vendor_id.id, 'account_id': vb_line.vendor_id.property_account_payable_id.id or False, 'freight_booking': self.booking_ref.id, }) for new_vendor_bill in vendor_bill_created: new_vendor_bill.vendor_bill_id = vendor_bill_id.id if vendor_count is False: raise exceptions.ValidationError('No Vendor in Cost & Profit!!!') def action_copy_to_booking(self): booking = self.env['freight.booking'].search([('id', '=', self.booking_ref.id),]) booking_val = { 'cargo_type': self.cargo_type or False, 'shipper_address_input': self.shipper, 'consignee_address_input': self.consignee, 'notify_party_address_input': self.notify_party, 'carrier_booking_no' : self.carrier_booking_no or False, 'voyage_no': self.voyage_no, 'port_of_loading_input': self.port_of_loading_input, 'port_of_discharge_input': self.port_of_discharge_input, 'place_of_delivery': self.place_of_delivery, 'note': self.note, 'bol_status': self.bol_status, 'no_of_original_bl': self.no_of_original_bl, 'carrier': self.carrier_c.id, } booking.sudo().write(booking_val) for booking_line in booking.operation_line_ids: booking_line.sudo().unlink() for booking_line in booking.operation_line_ids2: booking_line.sudo().unlink() for line in self.cargo_line_ids: if self.cargo_type == 'fcl': if line.container_product_name: operation_line_obj = self.env['freight.operations.line'] op_line = operation_line_obj.create({ 'operation_id': booking.id, 'container_no': line.container_no or '', 'container_product_id': line.container_product_id.id or False, 'seal_no': line.seal_no or '', 'container_product_name': line.container_product_name or '', 'packages_no': line.packages_no_value or '', 'packages_no_uom': line.packages_no_uom.id, 'exp_net_weight': line.exp_net_weight or '', 'exp_gross_weight': line.exp_gross_weight or '', 'dim_length': line.dim_length or '', 'dim_width': line.dim_width or '', 'dim_height': line.dim_height or '', 'exp_vol': line.exp_vol or '', 'remark': line.marks or '', }) booking.operation_line_ids = op_line else: if line.container_product_name: operation_line_obj = self.env['freight.operations.line2'] op_line = operation_line_obj.create({ 'operation_id2': booking.id, 'container_no': line.container_no or '', 'container_product_id': line.container_product_id.id or False, 'seal_no': line.seal_no or '', 'container_product_name': line.container_product_name or '', 'packages_no': line.packages_no_value or '', 'packages_no_uom': line.packages_no_uom.id, 'exp_net_weight': line.exp_net_weight or '', 'exp_gross_weight': line.exp_gross_weight or '', 'dim_length': line.dim_length or '', 'dim_width': line.dim_width or '', 'dim_height': line.dim_height or '', 'exp_vol': line.exp_vol or '', 'shipping_mark': line.marks or '', }) booking.operation_line_ids2 = op_line def action_copy_from_booking(self): booking = self.env['freight.booking'].search([('id', '=', self.booking_ref.id)]) for line in booking.cost_profit_ids: operation_line_obj = self.env['freight.bol.cost.profit'] op_line = operation_line_obj.create({ 'bol_id': self.id, 'product_id': line.product_id.id or False, 'product_name': line.product_name or '', 'profit_qty': line.profit_qty or 0, 'list_price': line.list_price or 0, 'profit_amount': line.profit_amount or 0, 'profit_currency': line.profit_currency.id or False, 'profit_currency_rate': line.profit_currency_rate or 0, 'cost_qty': line.cost_qty or 0, 'cost_price': line.cost_price or 0, 'cost_amount': line.cost_amount or 0, 'vendor_id': line.vendor_id.id or False, 'cost_currency': line.cost_currency.id or False, 'cost_currency_rate': line.cost_currency_rate or 0, }) def action_create_si(self): si_obj = self.env['freight.website.si'] si_val = { 'si_status': '01', 'carrier': self.carrier_c.id or False, 'direction': self.direction or False, 'cargo_type': self.cargo_type or False, 'service_type': self.service_type or False, 'customer_name': self.customer_name.id or False, 'shipper': self.shipper, 'consignee': self.consignee, 'notify_party': self.notify_party, 'carrier_booking_ref': self.carrier_booking_no, 'voyage_no': self.voyage_no, 'port_of_loading_input': self.port_of_loading_input, 'port_of_discharge_input': self.port_of_discharge_input, 'place_of_delivery': self.place_of_delivery, 'bl_ref': self.id, } si = si_obj.create(si_val) if self.cargo_type == 'fcl': container_line = self.cargo_line_ids si_line_obj = self.env['freight.website.si.fcl'] for line in container_line: if line.container_product_id or line.container_no: si_line = si_line_obj.create({ 'container_product_id': line.container_product_id.id or False, 'container_product_name': line.container_product_name or False, 'fcl_line': si.id or '', 'container_no': line.container_no or '', 'packages_no': line.packages_no_value or 0.0, 'packages_no_uom': line.packages_no_uom.id, 'exp_gross_weight': line.exp_gross_weight or 0.0, 'exp_vol': line.exp_vol or 0.0, }) si.write({'fcl_line_ids': si_line or False}) else: container_line = self.cargo_line_ids si_line_obj = self.env['freight.website.si.lcl'] for line in container_line: if line.container_product_id or line.container_no: si_line = si_line_obj.create({ 'container_product_name': line.container_product_name or False, #'container_product_id': line.container_commodity_id.id or False, 'lcl_line': si.id or '', 'container_no': line.container_no or '', 'packages_no': line.packages_no_value or 0.0, 'packages_no_uom': line.packages_no_uom.id, 'exp_gross_weight': line.exp_gross_weight or 0.0, 'exp_net_weight': line.exp_net_weight or 0.0, 'exp_vol': line.exp_vol or 0.0, # 'remark_line': line.remark or '', }) si.write({'lcl_line_ids': si_line or False}) @api.multi def operation_invoices(self): """Show Invoice for specific Freight Operation smart Button.""" for operation in self: invoices = self.env['account.invoice'].search([ ('freight_hbl', '=', operation.id), ('type', 'in', ['out_invoice', 'out_refund']), ('state', '!=', 'cancel'), ]) action = self.env.ref('account.action_invoice_tree1').read()[0] if len(invoices) > 1: action['domain'] = [('id', 'in', invoices.ids)] elif len(invoices) == 1: action['views'] = [(self.env.ref('account.invoice_form').id, 'form')] action['res_id'] = invoices.ids[0] else: action = {'type': 'ir.actions.act_window_close'} return action @api.multi def operation_bill(self): for operation in self: # Get from the vendor bill list vendor_bill_list = [] for cost_profit_line in operation.cost_profit_ids: for vendor_bill_line in cost_profit_line.vendor_bill_ids: if vendor_bill_line.type in ['in_invoice', 'in_refund']: vendor_bill_list.append(vendor_bill_line.id) invoices = self.env['account.invoice'].search([ ('freight_hbl', '=', operation.id), ('type', 'in', ['in_invoice', 'in_refund']), ('state', '!=', 'cancel'), ]) invoice_name_list = [] for x in invoices: invoice_name_list.append(x.id) unique_list = [] for y in vendor_bill_list: if invoice_name_list and len(invoice_name_list) > 0: if y not in invoice_name_list: unique_list.append(y) else: unique_list.append(y) for z in invoice_name_list: # if z not in vendor_bill_list: unique_list.append(z) if len(unique_list) > 1: views = [(self.env.ref('account.invoice_supplier_tree').id, 'tree'), (self.env.ref('account.invoice_supplier_form').id, 'form')] return { 'name': 'Vendor bills', 'view_type': 'form', 'view_mode': 'tree,form', # 'view_id': self.env.ref('account.invoice_supplier_tree').id, 'view_id': False, 'res_model': 'account.invoice', 'views': views, # 'context': "{'type':'in_invoice'}", 'domain': [('id', 'in', unique_list)], 'type': 'ir.actions.act_window', # 'target': 'new', } elif len(unique_list) == 1: # print('in vendor bill length =1') return { # 'name': self.booking_no, 'view_type': 'form', 'view_mode': 'form', 'res_model': 'account.invoice', 'res_id': unique_list[0] or False, # readonly mode # 'domain': [('id', 'in', purchase_order.ids)], 'type': 'ir.actions.act_window', 'target': 'popup', # readonly mode } @api.multi def operation_si(self): for operation in self: si = self.env['freight.website.si'].search([('bl_ref', '=', operation.id), ]) if len(si) > 1: views = [(self.env.ref('sci_goexcel_freight.view_tree_si').id, 'tree'), (self.env.ref('sci_goexcel_freight.view_form_si').id, 'form')] return { 'name': 'Shipping Instruction', 'view_type': 'form', 'view_mode': 'tree,form', 'view_id': False, 'res_model': 'freight.website.si', 'views': views, 'domain': [('id', 'in', si.ids)], 'type': 'ir.actions.act_window', } elif len(si) == 1: return { 'view_type': 'form', 'view_mode': 'form', 'res_model': 'freight.website.si', 'res_id': si.id or False, 'type': 'ir.actions.act_window', 'target': 'popup', # readonly mode } else: action = {'type': 'ir.actions.act_window_close'} return action def _get_invoiced_count(self): for operation in self: invoices = self.env['account.invoice'].search([ ('freight_hbl', '=', operation.id), ('type', 'in', ['out_invoice','out_refund']), ('state', '!=', 'cancel'), ]) self.update({ 'invoice_count': len(invoices), #'invoice_ids': invoices, }) def _get_bill_count(self): # vendor bill is created from booking job, vendor bill header will have the booking job id for operation in self: # Get from the vendor bill list vendor_bill_list = [] # vendor_bill_list_temp = [] for cost_profit_line in operation.cost_profit_ids: for vendor_bill_line in cost_profit_line.vendor_bill_ids: if vendor_bill_line.type in ['in_invoice', 'in_refund']: vendor_bill_list.append(vendor_bill_line.id) # vendor_bill_list_temp.append(vendor_bill_line.id) # print('vendor_bill_list: ', len(vendor_bill_list)) # remove the duplicates in the vendor bill list unique_vendor_bill_list = [] for i in vendor_bill_list: if i not in unique_vendor_bill_list: unique_vendor_bill_list.append(i) # print('unique_vendor_bill_list: ', len(unique_vendor_bill_list)) # Get the vendor list (Create the vendor from the job) invoices = self.env['account.invoice'].search([ ('freight_hbl', '=', operation.id), ('type', 'in', ['in_invoice', 'in_refund']), ('state', '!=', 'cancel'), ]) # print('vendor bills:', len(invoices)) invoice_name_list = [] for x in invoices: invoice_name_list.append(x.id) unique_list = [] # for x in invoices: # invoice_name_list.append(x.vendor_bill_id.id) # unique_list = [] for y in unique_vendor_bill_list: if invoice_name_list and len(invoice_name_list) > 0: if y not in invoice_name_list: unique_list.append(y) else: unique_list.append(y) for z in invoice_name_list: # if z not in vendor_bill_list: unique_list.append(z) if len(unique_list) > 0: self.update({ 'vendor_bill_count': len(unique_list), }) def _get_si_count(self): for operation in self: si = self.env['freight.website.si'].search([ ('bl_ref', '=', operation.id), ]) self.update({ 'si_count': len(si), }) # TS - add for Purchase Receipt purchase_receipt_count = fields.Integer(string='Purchase Receipt Count', compute='_get_pr_count', copy=False) def _get_pr_count(self): # get purchase receipt (Account Voucher) on the lines for operation in self: # Get PR list pr_lines = self.env['account.voucher.line'].search([ ('freight_hbl', '=', operation.id), ]) pr_list = [] for pr_line in pr_lines: if pr_line.voucher_id.state != 'cancel' and pr_line.voucher_id.voucher_type == 'purchase': pr_list.append(pr_line.voucher_id.id) # pr_name_list = [] # for x in pr_list: # pr_name_list.append(x.id) unique_list = [] for i in pr_list: if i not in unique_list: unique_list.append(i) if len(unique_list) > 0: self.update({ 'purchase_receipt_count': len(unique_list), }) @api.multi def operation_pr(self): for operation in self: for operation in self: # Get PR list pr_lines = self.env['account.voucher.line'].search([ ('freight_hbl', '=', operation.id), ]) pr_list = [] for pr_line in pr_lines: if pr_line.voucher_id.state != 'cancel' and pr_line.voucher_id.voucher_type == 'purchase': pr_list.append(pr_line.voucher_id.id) # pr_name_list = [] # for x in pr_list: # pr_name_list.append(x.id) unique_list = [] for i in pr_list: if i not in unique_list: unique_list.append(i) if len(unique_list) > 1: views = [(self.env.ref('account_voucher.view_voucher_tree').id, 'tree'), (self.env.ref('account_voucher.view_purchase_receipt_form').id, 'form')] return { 'name': 'Purchase Receipt', 'view_type': 'form', 'view_mode': 'tree,form', # 'view_id': self.env.ref('account.invoice_supplier_tree').id, 'view_id': False, 'res_model': 'account.voucher', 'views': views, # 'context': "{'type':'in_invoice'}", 'domain': [('id', 'in', unique_list)], 'type': 'ir.actions.act_window', # 'target': 'new', } elif len(unique_list) == 1: # print('in vendor bill length =1') return { # 'name': self.booking_no, 'view_type': 'form', 'view_mode': 'form', 'res_model': 'account.voucher', 'res_id': unique_list[0] or False, # readonly mode # 'domain': [('id', 'in', purchase_order.ids)], 'type': 'ir.actions.act_window', 'target': 'popup', # readonly mode } class CargoLine(models.Model): _name = 'freight.bol.cargo' _description = 'Cargo Line' cargo_line = fields.Many2one('freight.bol', string='Cargo Line', required=True, ondelete='cascade', index=True, copy=False) sequence = fields.Integer(string="sequence") marks = fields.Text(string='Marks and Numbers') container_no = fields.Char(string="Container No.") container_product_id = fields.Many2one('product.product', string='Container', track_visibility='onchange') seal_no = fields.Char(string="Seal No.") container_product_name = fields.Text(string='Description of Goods') packages_no_value = fields.Integer(string="No. of Packages") packages_no_uom = fields.Many2one('uom.uom', string="UoM") exp_net_weight = fields.Float(string="Net Weight(KG)", help="Expected Weight in kg.", track_visibility='onchange') exp_gross_weight = fields.Float(string="Gross Weight(KG)", digits=(12, 4), help="Expected Weight in kg.") dim_length = fields.Float(string='Length', help="Length in cm", default="0.00", track_visibility='onchange') dim_width = fields.Float(string='Width', default="0.00", help="Width in cm", track_visibility='onchange') dim_height = fields.Float(string='Height', default="0.00", help="Height in cm", track_visibility='onchange') exp_vol = fields.Float(string="Measurement (M3)", digits=(12, 4), help="Expected Volume in m3 Measure") packages_no = fields.Char(string="No. of Packages") @api.multi def _get_default_container_category(self): container_lines = self.env['freight.product.category'].search([('type', '=ilike', 'container')]) for container_line in container_lines: # _logger.warning('_get_default_container_category=' + str(container_line.product_category)) return container_line.product_category container_category_id = fields.Many2one('product.category', string="Container Product Id", default=_get_default_container_category) @api.onchange('container_product_name') def _onchange_description(self): bl = self.env['freight.bol'].search([('bol_no', '=', self.cargo_line.bol_no)]) if self.container_product_name: lines_description = self.container_product_name.count('\n')/20 lines_description = math.ceil(lines_description) x = self.container_product_name.split('\n') count = 0 line_description1 = '' line_description2 = '' for line in x: if count < 20: line_description1 = line_description1 + line + '\n' count = count + 1 else: line_description2 = line_description2 + line + '\n' count = count + 1 bl.write({'lines_description': lines_description, 'line_description1': line_description1, 'line_description2': line_description2, }) @api.model def create(self, vals): # _logger.warning("in create") res = super(CargoLine, self).create(vals) content = "" if vals.get("marks"): content = content + " \u2022 Marks and Numbers: " + str(vals.get("marks")) + "<br/>" if vals.get("container_product_name"): content = content + " \u2022 Description of Goods: " + str(vals.get("container_product_name")) + "<br/>" if vals.get("packages_no"): content = content + " \u2022 No. of Packages: " + str(vals.get("packages_no")) + "<br/>" if vals.get("seal_no"): content = content + " \u2022 Seal no: " + str(vals.get("seal_no")) + "<br/>" if vals.get("container_no"): content = content + " \u2022 Container No.: " + str(vals.get("container_no")) + "<br/>" if vals.get("exp_gross_weight"): content = content + " \u2022 Gross Weight(KG): " + str(vals.get("exp_gross_weight")) + "<br/>" if vals.get("exp_vol"): content = content + " \u2022 Measurement (M3): " + str(vals.get("exp_vol")) + "<br/>" res.cargo_line.message_post(body=content) return res @api.multi def write(self, vals): # _logger.warning("in write") res = super(CargoLine, self).write(vals) # _logger.warning("after super write") content = "" if vals.get("marks"): content = content + " \u2022 Marks and Numbers: " + str(vals.get("marks")) + "<br/>" if vals.get("container_product_name"): content = content + " \u2022 Description of Goods: " + str(vals.get("container_product_name")) + "<br/>" if vals.get("packages_no"): content = content + " \u2022 No. of Packages: " + str(vals.get("packages_no")) + "<br/>" if vals.get("seal_no"): content = content + " \u2022 Seal no: " + str(vals.get("seal_no")) + "<br/>" if vals.get("container_no"): content = content + " \u2022 Container No.: " + str(vals.get("container_no")) + "<br/>" if vals.get("exp_gross_weight"): content = content + " \u2022 Gross Weight(KG): " + str(vals.get("exp_gross_weight")) + "<br/>" if vals.get("exp_vol"): content = content + " \u2022 Measurement (M3): " + str(vals.get("exp_vol")) + "<br/>" self.cargo_line.message_post(body=content) return res class ChargeLine(models.Model): _name = 'freight.bol.charge' _description = 'Charge Line' charge_line = fields.Many2one('freight.bol', string='Charge Line', required=True, ondelete='cascade', index=True, copy=False) sequence = fields.Integer(string="sequence") freight_charges = fields.Text(string='Freight & Charges') rate = fields.Char(string='Rate') per = fields.Char(string="Per") amount = fields.Char(string="Amount") prepaid = fields.Char(string="Prepaid") collect = fields.Char(string="Collect") payable_at_by = fields.Char(string="Payable at/by") # fcl_container_qty = fields.Float(string="Qty", digits=(8, 0), track_visibility='onchange') revenue_tons = fields.Char(string='Revenue Tons') @api.model def create(self, vals): # _logger.warning("in create") res = super(ChargeLine, self).create(vals) content = "" if vals.get("freight_charges"): content = content + " \u2022 Freight & Charges: " + str(vals.get("freight_charges")) + "<br/>" if vals.get("revenue_tons"): content = content + " \u2022 Revenue Tons: " + str(vals.get("revenue_tons")) + "<br/>" if vals.get("rate"): content = content + " \u2022 Rate: " + str(vals.get("rate")) + "<br/>" if vals.get("per"): content = content + " \u2022 Per: " + str(vals.get("per")) + "<br/>" if vals.get("amount"): content = content + " \u2022 Amount: " + str(vals.get("amount")) + "<br/>" if vals.get("prepaid"): content = content + " \u2022 Prepaid: " + str(vals.get("prepaid")) + "<br/>" if vals.get("collect"): content = content + " \u2022 Collect: " + str(vals.get("collect")) + "<br/>" if vals.get("payable_at_by"): content = content + " \u2022 Payable at/by: " + str(vals.get("payable_at_by")) + "<br/>" res.charge_line.message_post(body=content) return res @api.multi def write(self, vals): # _logger.warning("in write") res = super(ChargeLine, self).write(vals) # _logger.warning("after super write") content = "" if vals.get("freight_charges"): content = content + " \u2022 Freight & Charges: " + str(vals.get("freight_charges")) + "<br/>" if vals.get("revenue_tons"): content = content + " \u2022 Revenue Tons: " + str(vals.get("revenue_tons")) + "<br/>" if vals.get("rate"): content = content + " \u2022 Rate: " + str(vals.get("rate")) + "<br/>" if vals.get("per"): content = content + " \u2022 Per: " + str(vals.get("per")) + "<br/>" if vals.get("amount"): content = content + " \u2022 Amount: " + str(vals.get("amount")) + "<br/>" if vals.get("prepaid"): content = content + " \u2022 Prepaid: " + str(vals.get("prepaid")) + "<br/>" if vals.get("collect"): content = content + " \u2022 Collect: " + str(vals.get("collect")) + "<br/>" if vals.get("payable_at_by"): content = content + " \u2022 Payable at/by: " + str(vals.get("payable_at_by")) + "<br/>" self.charge_line.message_post(body=content) return res class CostProfit(models.Model): _name = 'freight.bol.cost.profit' _description = "BOL Cost & Profit" sequence = fields.Integer(string="sequence") bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True, ondelete='cascade', index=True, copy=False) product_id = fields.Many2one('product.product', string="Product") product_name = fields.Text(string="Description") #Profit #profit_qty = fields.Integer(string='Qty', default="1") #profit_qty = fields.Float(string='Qty', default="1", digits=(12, 2)) list_price = fields.Float(string="Unit Price") uom_id = fields.Many2one('uom.uom', string="UoM") profit_gst = fields.Selection([('zer', 'ZER')], string="GST", default="zer", track_visibility='onchange') tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)]) profit_currency = fields.Many2one('res.currency', 'Currency', default=lambda self: self.env.user.company_id.currency_id.id, track_visibility='onchange') profit_currency_rate = fields.Float(string='Rate', default="1.00", track_visibility='onchange') profit_amount = fields.Float(string="Amt", compute="_compute_profit_amount", store=True, track_visibility='onchange') sale_total = fields.Float(string="Total Sales", compute="_compute_sale_total", store=True, track_visibility='onchange') #Cost #cost_qty = fields.Integer(string='Qty', default="1", track_visibility='onchange') profit_qty = fields.Float(string='Qty', default="1.000", digit=(12, 3)) cost_qty = fields.Float(string='Qty', default="1.000", digit=(12, 3)) cost_price = fields.Float(string="Unit Price", track_visibility='onchange') cost_gst = fields.Selection([('zer', 'ZER')], string="Tax", default="zer", track_visibility='onchange') vendor_id = fields.Many2one('res.partner', string="Vendor", track_visibility='onchange') vendor_bill_id = fields.Many2one('account.invoice', string="Vendor Bill") cost_currency = fields.Many2one('res.currency', string="Curr", required=True, default=lambda self: self.env.user.company_id.currency_id.id, track_visibility='onchange') cost_currency_rate = fields.Float(string='Rate', default="1.00", track_visibility='onchange') cost_amount = fields.Float(string="Amt", compute="_compute_cost_amount", store=True, track_visibility='onchange') cost_total = fields.Float(string="Total Cost", compute="_compute_cost_total", store=True, track_visibility='onchange') # Invoice & Bill billed = fields.Boolean(string='Billed', copy=False) is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed', store=True) added_to_invoice = fields.Boolean(string='Invoiced', copy=False) invoice_paid = fields.Boolean(string='Invoice Paid', copy=False) paid = fields.Boolean(string='Paid', copy=False) is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True) invoice_id = fields.Many2one('account.invoice', string="Invoice") inv_line_id = fields.Many2one('account.invoice.line', string="Invoice Line") bill_id = fields.Many2one('account.invoice', string="Bill") bill_line_id = fields.Many2one('account.invoice.line', string="Bill Line") route_service = fields.Boolean(string='Is Route Service', default=False) profit_total = fields.Float(string="Total Profit", compute="_compute_profit_total", store=True) margin_total = fields.Float(string="Margin %", compute="_compute_margin_total", digits=(8,2), store=True, group_operator="avg") vendor_id_ids = fields.Many2many('res.partner', string="Vendor List", copy=False) vendor_bill_ids = fields.Many2many('account.invoice', string="Vendor Bill List", copy=False) @api.one def _set_access_for_invoiced(self): if self.env['res.users'].has_group('account.group_account_manager'): self.invoiced_readonly = False else: self.invoiced_readonly = True invoiced_readonly = fields.Boolean(compute="_set_access_for_invoiced", string='Is user able to modify invoiced?') @api.depends('profit_qty', 'list_price') def _compute_profit_amount(self): for service in self: if service.product_id: service.profit_amount = service.profit_qty * service.list_price or 0.0 @api.depends('cost_qty', 'cost_price') def _compute_cost_amount(self): for service in self: if service.product_id: service.cost_amount = service.cost_qty * service.cost_price or 0.0 @api.depends('profit_amount', 'profit_currency_rate') def _compute_sale_total(self): for service in self: if service.product_id: service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0 @api.onchange('profit_currency_rate') def _onchange_profit_currency_rate(self): for service in self: if service.product_id: service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0 @api.onchange('profit_amount') def _onchange_profit_amount(self): for service in self: if service.product_id: service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0 service.profit_total = service.sale_total - service.cost_total or 0.0 @api.depends('cost_amount', 'cost_currency_rate') def _compute_cost_total(self): for service in self: if service.product_id: service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0 service.profit_total = service.sale_total - service.cost_total or 0.0 @api.depends('cost_total', 'sale_total') def _compute_profit_total(self): for service in self: if service.product_id: service.profit_total = service.sale_total - service.cost_total or 0.0 @api.depends('profit_total', 'sale_total') def _compute_margin_total(self): for service in self: if service.product_id: if service.sale_total > 0: service.margin_total = service.profit_total / service.sale_total * 100 @api.onchange('cost_amount') def _onchange_cost_amount(self): for service in self: if service.product_id: service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0 service.profit_total = service.sale_total - service.cost_total or 0.0 @api.onchange('cost_currency_rate') def _onchange_cost_currency_rate(self): for service in self: if service.product_id: service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0 service.profit_total = service.sale_total - service.cost_total or 0.0 @api.onchange('product_id') def _onchange_product_id(self): if not self.product_id: return {'domain': {'uom_id': []}} vals = {} domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.category_id.id)]} if not self.uom_id or (self.product_id.uom_id.id != self.uom_id.id): vals['uom_id'] = self.product_id.uom_id vals['product_name'] = self.product_id.name self.update(vals) if self.product_id: self.update({ 'list_price': self.product_id.list_price or 0.0, 'cost_price': self.product_id.standard_price or 0.0 }) @api.onchange('vendor_id') def _onchange_vendor_id(self): print('OnChange Vendor_ID') if self.vendor_id: if not self.billed: self.billed = False print('Invoiced False') @api.multi @api.depends('billed') def _compute_is_billed(self): for cost_profit_line in self: if cost_profit_line.vendor_id: if cost_profit_line.billed: cost_profit_line.is_billed = 'Y' elif not cost_profit_line.billed: cost_profit_line.is_billed = 'N' @api.multi @api.depends('paid') def _compute_is_paid(self): for cost_profit_line in self: if cost_profit_line.vendor_id: if cost_profit_line.paid: cost_profit_line.is_paid = 'Y' elif not cost_profit_line.paid: cost_profit_line.is_paid = 'N'
7,379
92d689e5caa2d8c65f86af0f8b49b009d162a783
from turtle import * from shapes import * #1- #1.triangle def eTriangle(): forward(100) right(120) forward(100) right(120) forward(100) right(120) mainloop() #2.square def square(): forward(100) right(90) forward(100) right(90) forward(100) right(90) forward(100) mainloop() #3.pentagon def pentagon(): forward(100) right(72) forward(100) right(72) forward(100) right(72) forward(100) right(72) forward(100) mainloop() #4.hexagon def hexagon(): forward(100) right(60) forward(100) right(60) forward(100) right(60) forward(100) right(60) forward(100) right(60) forward(100) mainloop() #5.octagon def octagon(): forward(100) right(45) forward(100) right(45) forward(100) right(45) forward(100) right(45) forward(100) right(45) forward(100) right(45) forward(100) right(45) forward(100) mainloop() #6.star def star(): forward(100) right(144) forward(100) right(144) forward(100) right(144) forward(100) right(144) forward(100) mainloop() #7.circle def circle(): for i in range(370): forward(2) right(1) mainloop() #2- from shapes import * eTriangle() square() pentagon() hexagon() octagon() star() circle() mainloop() #3- bgcolor("MidnightBlue") starp(20, True, "yellow", "MidnightBlue") right (20) forward(100) starp(20, True, "yellow", "MidnightBlue") right (30) forward(150) starp(20, True, "yellow", "MidnightBlue") right (40) forward(200) starp(20, True, "yellow", "MidnightBlue") right (50) forward(250) starp(20, True, "yellow", "MidnightBlue") right (60) forward(300) starp(20, True, "yellow", "MidnightBlue") forward(100) starp(20, True, "yellow", "MidnightBlue") forward(100) starp(20, True, "yellow", "MidnightBlue") left (90) forward(300) starp(20, True, "yellow", "MidnightBlue") right (50) forward (300) starp(20, True, "yellow", "MidnightBlue") right(50) forward(300) starp(20, True, "yellow", "MidnightBlue") right (50) forward (275) circlep(3, True, "SlateGrey", "MidnightBlue") right(60) forward(20) mainloop() #4- bgcolor("skyblue") right(90) penup() forward(100) right(90) forward(200) fillcolor("Green") begin_fill() forward (300) left(90) forward (300) left(90) forward(1250) left(90) forward(300) left(90) forward(1000) end_fill() right (90) pendown() rectangle(200, 450, True, "Red") left(180) forward(200) left(90) penup() forward(100) right(90) pendown rectangle(50, 100, True, "Brown") penup() right(90) forward(50) right(90) forward(50) right (90) forward(10) circle(.1, True, "Black") penup() forward(40) left(90) forward(50) pendown() fillcolor("grey") begin_fill() left (20) forward(400) left (75) forward(50) left(105) forward(400) left(75) forward(50) end_fill() right(5) penup() forward(200) right(90) forward(200) right(90) left(40) pendown() fillcolor("brown") begin_fill() forward(293.717) right(80) forward(293.717) right(140) forward(450) end_fill() penup() left(90) forward(75) left(90) forward(75) pendown() square(50, True, "blue", "Black") right(90) square(25, False, "blue", "black") right(90) forward(50) right(90) forward(25) square(25, False, "blue", "black") penup() left(90) forward(25) right(90) forward(200) pendown() square(50, True, "blue", "Black") right(90) square(25, False, "blue", "black") right(90) forward(50) right(90) forward(25) square(25, False, "blue", "black") penup() left(90) forward(250) left (90) forward(400) circlep(3, True, "yellow", "yellow") mainloop() #5- def door(): rectangle(50, 100, True, "Brown") penup() right(90) forward(50) right(90) forward(50) right (90) forward(10) circle(.1, True, "Black") def grass(): fillcolor("Green") begin_fill() forward (300) left(90) forward (300) left(90) forward(1250) left(90) forward(300) left(90) forward(1000) end_fill() def house(): rectangle(200, 450, True, "Red") def roof(): fillcolor("brown") begin_fill() forward(293.717) right(80) forward(293.717) right(140) forward(450) end_fill() def window(): square(50, True, "blue", "Black") right(90) square(25, False, "blue", "black") right(90) forward(50) right(90) forward(25) square(25, False, "blue", "black") def sun(): circlep(3, True, "yellow", "yellow") def sidewalk(): fillcolor("grey") begin_fill() left (20) forward(400) left (75) forward(50) left(105) forward(400) left(75) forward(50) end_fill() bgcolor("skyblue") right(90) penup() forward(100) right(90) forward(200) grass() right (90) pendown() house() left(180) forward(200) left(90) penup() forward(100) right(90) pendown door() penup() forward(40) left(90) forward(50) pendown() sidewalk() right(5) penup() forward(200) right(90) forward(200) right(90) left(40) pendown() roof() penup() left(90) forward(75) left(90) forward(75) pendown() window() penup() left(90) forward(25) right(90) forward(200) pendown() window() penup() left(90) forward(250) left (90) forward(400) sun() mainloop() #6- import random def craystar(): color('red', 'yellow') begin_fill() for i in range(36): forward(200) left(170) end_fill() def craytriangle(): color('black', 'blue') begin_fill() i = 60 while i > 0: forward(i) right(120) i -= 5 end_fill() def craysquare(): color("green", "Blue") begin_fill() for i in range(12): for i in range(4): forward(60) right(90) for i in range(12): forward (random.randint(1,60)) right(90) end_fill() craysquare() forward (50) craysquare() forward (50) craysquare() forward (50) craystar() forward(random.randint(1,100)) right(random.randint(1, 90)) craytriangle() forward(random.randint(1,100)) right(random.randint(1, 90)) craystar() forward(random.randint(1,100)) right(random.randint(1, 90)) craytriangle() forward(random.randint(1,100)) right(random.randint(1, 90)) craystar() forward(random.randint(1,100)) right(random.randint(1, 90)) craytriangle() mainloop()
7,380
e4f194c3dbc3e1d62866343642e41fa1ecdeab93
#!/usr/bin/python3 import os, re import csv, unittest from langtag import langtag from sldr.iana import Iana langtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub', 'langtags.json') bannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96] def nonascii(s): cs = [ord(x) for x in s] if any(not (32 <= x < 123) or x in bannedchars for x in cs): return True class Basic(unittest.TestCase): extraScripts = ["Toto", "Vith"] extraLangs = ("000", "cxh", "dsk", "dyr", "eud", "ikh", "izm", "lgs", # going in ~23/Mar/2023 'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr', 'ykh', 'zem', 'zlu') # going in ~23/Mar/2023 def setUp(self): self.fname = os.path.join(os.path.dirname(__file__), '../source/langtags.csv') with open(self.fname) as csvfile: reader = csv.DictReader(csvfile, restkey="_") self.rows = list(reader) self.fieldnames = reader.fieldnames self.numlines = reader.line_num self.iana = Iana() def _region_test(self, x): if x in self.iana.region: return True elif x in ("XX", "XK"): return True return False def _allRows(self): for r in self.rows: t = langtag(r['likely_subtag']) if t.lang.startswith("x-"): continue yield (r, t) def test_lang(self): ''' Tests that all lang subtags are in iana ''' fails = [] for r, t in self._allRows(): l = langtag(r['Lang_Id']) if l.lang != t.lang and "-" not in l.lang and "-" not in t.lang: self.fail("{Lang_Id} has different lang to {likely_subtag} ({0} != {1})".format(l.lang, t.lang, **r)) if t.lang not in self.iana.language and "-" not in t.lang and t.lang not in self.extraLangs: fails.append(r['Lang_Id']) if not l.test(fname=langtagjson): self.fail("{Lang_Id} failed conformance check".format(**r)) if len(fails): self.fail(f"{fails} langs not in IANA") def test_region(self): ''' Test that region values are sensible and that they equal the default region. Unknown regions do not have to be specified. ''' for r,t in self._allRows(): reg = t.region if not self._region_test(t.region): self.fail("{likely_subtag} has irregular region".format(**r)) for s in r['regions'].split(): if not self._region_test(s.strip()): self.fail("{Lang_Id} has irregular region: {0} in regions".format(s, **r)) def test_script(self): ''' Qaa? type scripts must have an -x- for the script name ''' for r, t in self._allRows(): scr = t.script if scr is not None and (scr.startswith("Qaa") or scr.startswith("Qab")): if scr not in ("Qaax", "Qaby", "Qabz") and (t.extensions is None or 'x' not in t.extensions): self.fail("{Lang_Id} has no extension for script name".format(**r)) elif scr not in self.iana.script and scr not in self.extraScripts: self.fail("{Lang_Id} has irregular script {}".format(scr, **r)) elif t.script not in self.iana.script and t.script not in self.extraScripts: self.fail("{likely_subtag} has irregular script".format(**r)) def test_variants(self): ''' Test that all variants are in IANA ''' for r, t in self._allRows(): l = langtag(r['Lang_Id']) if t.vars is None and l.vars is None: continue if sorted(t.vars) != sorted(l.vars): self.fail("{Lang_Id} and {likely_subtag} have different variants".format(**r)) for v in t.vars: if v not in self.iana.variant: self.fail("{likely_subtag} has bad variant {0}".format(v, **r)) def test_csv_columns(self): ''' Test that everyone has the right number of columns ''' lc = self.fieldnames[-1] for r in self.rows: if len(r.get("_", [])): self.fail("{Lang_Id} has too many columns".format(**r)) elif r[lc] is None: self.fail("{Lang_Id} has too few columns".format(**r)) def test_pua(self): ''' Test that anything with -x- in Lang_Id has it in likely_subtag too ''' for r, t in self._allRows(): l = langtag(r['Lang_Id']) if t.ns is None and l.ns is None: continue if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1: continue # allow a private script extension if sorted(t.ns.keys()) != sorted(l.ns.keys()): self.fail("{Lang_Id} and {likely_subtag} have different extension namespaces".format(**r)) for k, v in t.ns.items(): if sorted(v) != sorted(l.ns[k]): self.fail("{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace".format(k, **r)) def test_ascii(self): ''' Test that all tags are pure ascii ''' for r, t in self._allRows(): for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3', 'Macro', 'variants'): if nonascii(r[cid]): self.fail("{Lang_Id} has non ASCII in column {0} value {1}".format(cid, r[cid], **r)) def test_iso639(self): ''' Test that the iso639 column is either empty or 3 lower ascii chars. ''' k = 'ISO 639-3' for r, t in self._allRows(): if r[k] == '': continue if len(r[k]) != 3 or r[k].lower() != r[k] or any(not (96 < ord(x) < 123) for x in r[k]): self.fail("{Lang_Id} has faulty ISO639 code of {ISO 639-3}".format(**r)) def test_deprecated(self): for r, t in self._allRows(): l = langtag(r['Lang_Id']) inf = self.iana.language.get(l.lang, {}) if 'Deprecated' in inf: if r['deprecated'] == '': self.fail("{Lang_Id} was deprecated: {} in IANA but not in the database".format(inf['Deprecated'], **r)) if __name__ == "__main__": unittest.main()
7,381
6b3cb7a42c8bc665e35206b135f6aefea3439758
""" DB models. """ from sqlalchemy import Column, Integer, String, ForeignKey, DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship from db.session import map_engine, replay_engine MapBase = declarative_base(bind=map_engine) ReplayBase = declarative_base(bind=replay_engine) class Map(MapBase): __tablename__ = 'map' id = Column(Integer, primary_key=True) name = Column(String) size_x = Column(Integer) size_y = Column(Integer) lines = relationship('Line', backref='map', lazy='dynamic') points = relationship('Point', backref='map', lazy='dynamic') posts = relationship('Post', backref='map', lazy='dynamic') def __repr__(self): return "<Map(id='{}', name='{}', size_x='{}', size_y='{}')>".format( self.id, self.name, self.size_x, self.size_y) class Line(MapBase): __tablename__ = 'line' id = Column(Integer, primary_key=True) len = Column(Integer) p0 = Column(Integer) p1 = Column(Integer) map_id = Column(Integer, ForeignKey('map.id')) def __repr__(self): return "<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".format( self.id, self.len, self.p0, self.p1, self.map_id) class Point(MapBase): __tablename__ = 'point' id = Column(Integer, primary_key=True) map_id = Column(Integer, ForeignKey('map.id')) x = Column(Integer) y = Column(Integer) posts = relationship('Post', backref='point', lazy='dynamic') def __repr__(self): return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format( self.id, self.map_id, self.x, self.y) class Post(MapBase): __tablename__ = 'post' id = Column(Integer, primary_key=True) name = Column(String) type = Column(Integer) population = Column(Integer) armor = Column(Integer) product = Column(Integer) replenishment = Column(Integer) map_id = Column(Integer, ForeignKey('map.id')) point_id = Column(Integer, ForeignKey('point.id')) def __repr__(self): return ( "<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', " "product='{}', replenishment='{}', map_id='{}', point_id='{}')>".format( self.id, self.name, self.type, self.population, self.armor, self.product, self.replenishment, self.map_id, self.point_id ) ) class Game(ReplayBase): __tablename__ = 'game' id = Column(Integer, primary_key=True) name = Column(String) date = Column(DateTime) map_name = Column(String) actions = relationship('Action', backref='game', lazy='dynamic') num_players = Column(Integer) def __repr__(self): return "<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>".format( self.id, self.name, self.date, self.map_name, self.num_players) class Action(ReplayBase): __tablename__ = 'action' id = Column(Integer, primary_key=True) game_id = Column(Integer, ForeignKey('game.id')) code = Column(Integer) message = Column(String) date = Column(DateTime) def __repr__(self): return "<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>".format( self.id, self.game_id, self.code, self.message, self.date)
7,382
c6ab82d7f59faeee2a74e90a96c2348b046d0889
#Multiple Word Palindromes #Ex 72 extended word = input("Word: ") new = [] o = [] r = [] #canceling out the spaces for i in range(len(word)): if word[i] in ".,?!" or word[i] == ' ': pass else: new.append(word[i]) #original for i in range(len(new)): o.append(new[i]) #reverse for i in range(len(new)): r.append(new[-i - 1]) print(new) print(o) print(r) same_count = 0 for i in range(len(new)): if o[i] == r[i]: same_count += 1 else: pass if same_count == len(new): print("Palindrome") else: print("Non Palindrome")
7,383
cdabb4a118cb0ef55c271a446fa190a457ebe142
#!/usr/bin/env python # -*- coding:utf-8 _*- """ :Author :weijinlong :Time: :2020/1/10 17:22 :File :graph.py :content: """ import tensorflow as tf from .base import TFLayer class TFModel(TFLayer): def build_model(self): raise NotImplementedError def add_outputs(self, *args, **kwargs): """模型的输出值 :param args: :param kwargs: :return: """ outputs = {} for value in args: assert isinstance(value, tf.Tensor), "function add_outputs parameter's value must be tf.Tensor" name = value.name outputs[name.split(':')[0]] = name for key, value in kwargs.items(): assert isinstance(value, tf.Tensor), "function add_outputs parameter's value must be tf.Tensor" outputs[key] = value.name self.update_outputs(outputs) class TFCompile(TFLayer): def compile(self): raise NotImplementedError def add_metrics(self, *args, **kwargs): """加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作 :param args: :param kwargs: :return: """ metrics = {} for value in args: assert isinstance(value, (tf.Operation, tf.Tensor)), \ "function add_metrics parameter's value must be tf.Operation" name = value.name metrics[name.split(':')[0]] = name for key, value in kwargs.items(): assert isinstance(value, (tf.Operation, tf.Tensor)), \ "function add_metrics parameter's value must be tf.Operation" metrics[key] = value.name self.update_metrics(metrics) @property def fetches(self): """ 获取模型输出值或者评估值, 来优化训练模型 :return: """ return self.metrics class TFComModel(TFModel, TFCompile): """ 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译 """ def build_model(self): raise NotImplementedError def compile(self): pass
7,384
401c6b09edf593e00aecf5bbb1b2201effc9e78c
# # @lc app=leetcode id=14 lang=python3 # # [14] Longest Common Prefix # # @lc code=start class Solution: def longestCommonPrefix(self, strs: List[str]) -> str: pass # At the moment I just wanna test my workspace so it's working tomorrow it's time for the problems # @lc code=end
7,385
1dd09a09f542099091d94d466ebd7cc149884eb4
import time from junk.keyboard_non_blocking import NonBlockingKeyboard TICK_DURATION = 0.05 INITIAL_FOOD_LEVEL = 100 FOOD_PER_TICK = -1 FOOD_PER_FEED = 10 MAX_FOOD_LEVEL = 100 INITIAL_ENERGY_LEVEL = 50 ENERGY_PER_TICK_AWAKE = -1 ENERGY_PER_TICK_ASLEEP = 5 MAX_ENERGY_LEVEL = 100 INITIAL_IS_AWAKE = False INITIAL_POOP_LEVEL = 0 TICKS_PER_POOP = 25 MAX_POOP_LEVEL = 10 class UnknownCommand(Exception): pass def _add_and_clip(x, dx, x_min, x_max): return max(x_min, min(x_max, x + dx)) class Tamagotchi: def __init__(self) -> None: self._age = 0 self._food_level = INITIAL_FOOD_LEVEL self._energy_level = INITIAL_ENERGY_LEVEL self._poop_level = INITIAL_POOP_LEVEL self._is_awake = INITIAL_IS_AWAKE self._commands = { "f": self._feed, "c": self._clean, "s": self._sleep, } def __repr__(self) -> str: return f"Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})" def process_command(self, command: str) -> None: try: self._commands[command]() except KeyError: raise UnknownCommand(command) def _feed(self) -> None: if self._is_awake: self._food_level = _add_and_clip( self._food_level, FOOD_PER_FEED, 0, MAX_FOOD_LEVEL ) def _clean(self) -> None: self._poop_level = 0 def _sleep(self) -> None: self._is_awake = False def is_alive(self) -> bool: return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL def update(self) -> None: self._age += 1 # Food self._food_level = _add_and_clip( self._food_level, FOOD_PER_TICK, 0, MAX_FOOD_LEVEL ) # Energy if self._energy_level >= MAX_ENERGY_LEVEL: self._is_awake = True if self._energy_level <= 0: self._is_awake = False energy_delta = ( ENERGY_PER_TICK_AWAKE if self._is_awake else ENERGY_PER_TICK_ASLEEP ) self._energy_level = _add_and_clip( self._energy_level, energy_delta, 0, MAX_ENERGY_LEVEL ) # Poop if self._age % TICKS_PER_POOP == 0: self._poop_level += 1 def main(): tamagotchi = Tamagotchi() with NonBlockingKeyboard() as kb: while True: inpt = kb.getstr() should_quit = False for c in inpt: try: tamagotchi.process_command(c) except UnknownCommand: if c == "q": should_quit = True break else: raise if should_quit: break tamagotchi.update() print(tamagotchi) if not tamagotchi.is_alive(): print("tamagotchi died") break time.sleep(TICK_DURATION) if __name__ == "__main__": main()
7,386
70188d011ef60b1586864c4b85a9f9e70e5a4caf
from fastapi import FastAPI, Header, Cookie, Form, Request, requests, Body, Response, HTTPException, status, Path, Query from fastapi.responses import HTMLResponse from typing import Optional from fastapi.testclient import TestClient from typing import List, Callable from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from fastapi.exceptions import RequestValidationError from fastapi.routing import APIRoute from starlette.responses import JSONResponse from pydantic import BaseModel import uvicorn import time payloads = { 'peoples': [ { 'firstname': 'watcharapon', 'lastname': 'weeraborirak', 'age': '24', 'city': 'bangkok' }, { 'firstname': 'somsak', 'lastname': 'tamjai', 'age': '22', 'city': 'bangkok' }, { 'firstname': 'rakkana', 'lastname': 'meejai', 'age': '66', 'city': 'outcast' }, ] } class Item(BaseModel): name: str price: float class ValidationError(APIRoute): def get_route_handler(self) -> Callable: original_route_handler = super().get_route_handler() async def customer_route_handler(request: Request) -> Response: try: return await original_route_handler(request) except RequestValidationError as exc: body = await request.body() detail = {'error': exc.errors(), 'body': body.decode()} raise HTTPException(status_code=200, detail=detail) return customer_route_handler app = FastAPI() app.router.route_class = ValidationError app.mount('/static', StaticFiles(directory='static'), name='static') templates = Jinja2Templates(directory='templates') client = TestClient(app) @app.middleware('http') async def add_process_time_header(request: Request, call_next): start_time = time.time() response = await call_next(request) process_time = time.time() - start_time response.headers['X-Process-Time'] = '{}'.format(str(round(process_time, 4))) return response @app.middleware('http') async def add_process_name(request: Request, call_next): response = await call_next(request) response.headers['X-Owner-Server'] = 'Kane' return response @app.post('/items') async def base_model(item: Item): item_dict = item.dict() return {'message': item_dict} @app.put('/items/{item_id}') async def item_id(item_id: int, item: Item): return {'item_id': item_id, **item.dict()} @app.get("/items_id/{item_id}") async def read_items( item_id: int = Path(..., title="The ID of the item to get"), q: Optional[str] = Query(None, alias="item-query") ): results = {"item_id": item_id} if q: results.update({"q": q}) return results @app.get('/peoples') async def fetch_movies(query: str = None): # query param string payload = [p[query] for p in payloads['peoples']] return payload @app.get('/member') async def member(item: Item, X_Item_ID: str = Header(...)): # Header print(X_Item_ID) if X_Item_ID != 'member': raise HTTPException(status_code=400, detail="X-Item-ID header invalid") return JSONResponse(content={item.name: 'kane', item.price: 123.33}) @app.get('/member/token') async def member_token(x_token: str = Cookie(None)): print(x_token) return {'message': f'success cookie {x_token}'} @app.get('/api_body/{item_id}') # dynamic route async def api_body(item_id: str): return {'item_id': item_id} @app.post('/payload_request', response_model=Item, status_code=status.HTTP_201_CREATED) async def payload_request(item: Item): return item @app.post("/payload_json") async def create_item(payload: dict = Body(...)): print(payload) return payload @app.post('/form_data') async def form_data(password: str = Form(...), username: str = Form(...)): return {'message': {'user': username, 'pwd': password}} @app.post('/cookies') async def cookies(response: Response): response.set_cookie(key='foo', value='value') return {'message': 'cookies darken'} @app.get('/') @app.get('/index', tags=['dashboard']) async def index(request: Request): return templates.TemplateResponse('template_fastapi/login.vue', context={'request': request}) @app.get("/func_element", response_model=Item, tags=["Description"], deprecated=True) async def func_element(item: Item): """ Get Data Element: - **name**: my_name - **price**: price """ return item @app.post("/func_item", response_model=Item, tags=["Description"], summary="Create an item", description="Create an item with all the , name, description, price, tax and a set of unique tags") async def fuc_item(item: Item): update_item = item.dict() update_item['name'] = 'kane_ja' return update_item @app.post('/json_response', response_model=Item, tags=['Description']) async def json_response(item: Item): """ Return JsonResponse - **Item**: name - **status**: 201 """ return JSONResponse(content={item.name: 'kaneeang'}, status_code=201) if __name__ == '__main__': uvicorn.run('fastapi_route_config:app', debug=True, port=8080)
7,387
2d20bac0f11fa724b2d0a2e0676e5b9ce7682777
# -*- coding: utf-8 -*- # Copyright (c) 2018-2019 Linh Pham # wwdtm_panelistvspanelist is relased under the terms of the Apache License 2.0 """WWDTM Panelist Appearance Report Generator""" import argparse from collections import OrderedDict from datetime import datetime import json import os import shutil from typing import List, Dict, Text import mysql.connector import pytz from jinja2 import Environment, FileSystemLoader def retrieve_panelist_appearance_counts(panelist_id: int, database_connection: mysql.connector.connect ) -> List[Dict]: """Retrieve yearly apperance count for the requested panelist ID""" cursor = database_connection.cursor() query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count " "FROM ww_showpnlmap pm " "JOIN ww_shows s ON s.showid = pm.showid " "JOIN ww_panelists p ON p.panelistid = pm.panelistid " "WHERE pm.panelistid = %s AND s.bestof = 0 " "AND s.repeatshowid IS NULL " "GROUP BY p.panelist, YEAR(s.showdate) " "ORDER BY p.panelist ASC, YEAR(s.showdate) ASC") cursor.execute(query, (panelist_id, )) result = cursor.fetchall() if not result: return None appearances = OrderedDict() total_appearances = 0 for row in result: appearances[row[0]] = row[1] total_appearances += row[1] appearances["total"] = total_appearances return appearances def retrieve_all_panelist_appearance_counts(database_connection: mysql.connector.connect ) -> List[Dict]: """Retrieve all appearance counts for all panelists from the database""" cursor = database_connection.cursor() query = ("SELECT DISTINCT p.panelistid, p.panelist " "FROM ww_showpnlmap pm " "JOIN ww_panelists p ON p.panelistid = pm.panelistid " "JOIN ww_shows s ON s.showid = pm.showid " "WHERE s.bestof = 0 AND s.repeatshowid IS NULL " "ORDER BY p.panelist ASC") cursor.execute(query) result = cursor.fetchall() if not result: return None panelists = [] for row in result: panelist = {} panelist_id = row[0] panelist["name"] = row[1] appearances = retrieve_panelist_appearance_counts(panelist_id=panelist_id, database_connection=database_connection) panelist["appearances"] = appearances panelists.append(panelist) return panelists def retrieve_all_years(database_connection: mysql.connector.connect) -> List[int]: """Retrieve a list of all available show years""" cursor = database_connection.cursor() query = ("SELECT DISTINCT YEAR(s.showdate) FROM ww_shows s " "ORDER BY YEAR(s.showdate) ASC") cursor.execute(query) result = cursor.fetchall() cursor.close() if not result: return None years = [] for row in result: years.append(row[0]) return years def load_config(): """Load configuration values from configuration file and from options passed into script execution""" # Read in configuration file for default values with open("config.json", "r") as config_file: config_dict = json.load(config_file) # Read in options passed in that override values from the config.json file parser = argparse.ArgumentParser() parser.add_argument("--ga-property-code", dest="ga_property_code", type=str, help="Google Analytics Property Code (default: %(default)s)", default=config_dict["report"]["ga_property_code"]) parser.add_argument("--css-directory", dest="css_directory", type=str, help="Directory where the base CSS stylesheet file is stored " "(default: %(default)s)", default=config_dict["report"]["css_directory"]) parser.add_argument("--css-filename", dest="css_filename", type=str, help="File name of the report CSS stylesheet file " "(default: %(default)s)", default=config_dict["report"]["css_filename"]) parser.add_argument("--output-directory", dest="output_directory", type=str, help="Directory where the generated report will be saved " "(default: %(default)s)", default=config_dict["report"]["output_directory"]) parser.add_argument("--output-filename", dest="output_filename", type=str, help="File name of the generated report will be saved " "(default: %(default)s)", default=config_dict["report"]["output_filename"]) args = parser.parse_args() # Override the values from the config.json file if values were set via argparse if args.ga_property_code != config_dict["report"]["ga_property_code"]: config_dict["report"]["ga_property_code"] = args.ga_property_code if args.css_directory != config_dict["report"]["css_directory"]: config_dict["report"]["css_directory"] = args.css_directory if args.css_filename != config_dict["report"]["css_filename"]: config_dict["report"]["css_filename"] = args.css_filename if args.output_directory != config_dict["report"]["output_directory"]: config_dict["report"]["output_directory"] = args.output_directory if args.output_filename != config_dict["report"]["output_filename"]: config_dict["report"]["output_filename"] = args.output_filename return config_dict def render_report(show_years: List[int], panelists: List[Dict], report_settings: Dict ) -> Text: """Render appearances report using Jinja2""" # Setup Jinja2 Template template_loader = FileSystemLoader("./template") template_env = Environment(loader=template_loader, trim_blocks=True, lstrip_blocks=True) template_file = "report.tmpl.html" template = template_env.get_template(template_file) # Generate timestamp to include in page footer time_zone = pytz.timezone("America/Los_Angeles") rendered_date_time = datetime.now(time_zone) # Build dictionary to pass into template renderer render_data = {} render_data["show_years"] = show_years render_data["panelists"] = panelists render_data["settings"] = report_settings render_data["rendered_at"] = rendered_date_time.strftime("%A, %B %d, %Y %H:%M:%S %Z") # Render the report and write out to output directory report = template.render(render_data=render_data) return report def generate_output_files(rendered_report: Text, report_settings: Dict) -> None: """Writes out the generated report file to file in the output directory and copies the base CSS file to the same directory""" css_path = os.path.join(report_settings["css_directory"], report_settings["css_filename"]) output_path = os.path.join(report_settings["output_directory"], report_settings["output_filename"]) # Create the output directory if it does not exist if not os.path.isdir(report_settings["output_directory"]): os.mkdir(report_settings["output_directory"]) # Write out the generated report with open(output_path, "w") as output_file: if output_file.writable(): output_file.write(rendered_report) else: print("Error: {} is not writable".format(output_path)) # Copy CSS file into output directory shutil.copy2(css_path, report_settings["output_directory"]) return def main(): """Bootstrap database connection, retrieve panelist appearance data, generate the report and create an output bundle""" app_config = load_config() database_connection = mysql.connector.connect(**app_config["database"]) panelists = retrieve_all_panelist_appearance_counts(database_connection) show_years = retrieve_all_years(database_connection) rendered_report = render_report(show_years=show_years, panelists=panelists, report_settings=app_config["report"]) generate_output_files(rendered_report=rendered_report, report_settings=app_config["report"]) # Only run if executed as a script and not imported if __name__ == '__main__': main()
7,388
5172819da135600d0764033a85a4175098274806
import numpy as np import pandas as pd import datetime import time from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn import preprocessing from sklearn.model_selection import KFold def make_submission(y_predict, user_id_test, movie_id_test, name=None, date=True): n_elements = len(y_predict) if name is None: name = 'submission' if date: name = name + '_{}'.format(time.strftime('%d-%m-%Y_%Hh%M')) with open(name + ".csv", 'w') as f: f.write('"USER_ID_MOVIE_ID","PREDICTED_RATING"\n') for i in range(n_elements): if np.isnan(y_predict[i]): raise ValueError('NaN detected!') line = '{:0.0f},{:0.0f},{}\n'.format(user_id_test[i],movie_id_test[i],y_predict[i]) f.write(line) print("Submission file successfully written!") class ModelSelection: def __init__(self, user_data, movie_data, aggregated_data, train_data, output_train): self.train = train_data self.users = user_data self.aggregated = aggregated_data self.movies = movie_data self.output_train = output_train def optimizeParametersDecisionTreeClassifier(self, nb_fold, max_depth_range): kf = KFold(n_splits=nb_fold) depth_n_errors = np.zeros((max_depth_range.__len__(), 2)) i = 0 for depth in max_depth_range: depth_n_errors[i][0] = depth i += 1 #First round of cv for train_index, test_index in kf.split(self.aggregated): #Second round of cv i = 0 for depth in max_depth_range: dt = DecisionTreeClassifier(max_depth=depth) scores = cross_val_score(dt, self.aggregated[train_index], self.output_train[train_index], cv=nb_fold, scoring='neg_mean_squared_error') depth_n_errors[i][1] += -scores.mean() i += 1 i = 0 for depth in max_depth_range: depth_n_errors[i][1] /= nb_fold i += 1 best_depth = 0 best_error = 5 #Take the best model and cross validate it on the whole data for depth, error in depth_n_errors: if(error < best_error): best_error = error best_depth = depth #Recompute the error for this model on the whole data set dt = DecisionTreeClassifier(max_depth=best_depth) final_error = -cross_val_score(dt, self.aggregated, self.output_train, cv=nb_fold, scoring='neg_mean_squared_error') return[best_depth, final_error.mean()] def optimizeParametersKNeighborsClassifier(self, nb_fold, k_range): kf = KFold(n_splits=nb_fold) k_n_errors = np.zeros((k_range.__len__(), 2)) i = 0 for k in k_range: k_n_errors[i][0] = k i += 1 #First round of cv for train_index, test_index in kf.split(self.aggregated): #Second round of cv i = 0 for k in k_range: dt = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(dt, self.aggregated[train_index], self.output_train[train_index], cv=nb_fold, scoring='neg_mean_squared_error') k_n_errors[i][1] += -scores.mean() i += 1 for i in range(k_range.__len__()): k_n_errors[i][1] /= nb_fold best_k = 0 best_error = 5 #Take the best model and cross validate it on the whole data for k, error in k_n_errors: if(error < best_error): best_error = error best_k = k #Recompute the error for this model on the whole data set dt = KNeighborsClassifier(n_neighbors=best_k) final_error = -cross_val_score(dt, self.aggregated, self.output_train, cv=nb_fold, scoring='neg_mean_squared_error') return[best_k, final_error.mean()] def optimizeParametersKNeighborsRegressor(self, nb_fold, k_range): kf = KFold(n_splits=nb_fold) k_n_errors = np.zeros((k_range.__len__(), 2)) i = 0 for k in k_range: k_n_errors[i][0] = k i += 1 #First round of cv for train_index, test_index in kf.split(self.aggregated): #Second round of cv i = 0 for k in k_range: dt = KNeighborsRegressor(n_neighbors=k) scores = cross_val_score(dt, self.aggregated[train_index], self.output_train[train_index], cv=nb_fold, scoring='neg_mean_squared_error') k_n_errors[i][1] += -scores.mean() i += 1 for i in range(k_range.__len__()): k_n_errors[i][1] /= nb_fold best_k = 0 best_error = 5 #Take the best model and cross validate it on the whole data for k, error in k_n_errors: if(error < best_error): best_error = error best_k = k #Recompute the error for this model on the whole data set dt = KNeighborsRegressor(n_neighbors=best_k) final_error = -cross_val_score(dt, self.aggregated, self.output_train, cv=nb_fold, scoring='neg_mean_squared_error') return[best_k, final_error.mean()] users = pd.read_csv("data/user_data_normalized_28-11-2016_01h32.csv", delimiter=",") movies = pd.read_csv("data/movie_data_normalized.csv", delimiter=",") train = pd.read_csv("data/data_train.csv", delimiter=",") output = pd.read_csv("data/output_train.csv", delimiter=",")["rating"] aggregated = pd.read_csv("data/agregated_data_28-11-2016_01h50.csv", delimiter=",") ms = ModelSelection(users.values, movies.values, aggregated.values, train.values, output) #print(ms.optimizeParametersDecisionTreeClassifier(5, range(2,3,1))) print(ms.optimizeParametersKNeighborsClassifier(5, range(1,5,1))) #print(ms.optimizeParametersKNeighborsClassifier(5, range(5,10,1)))
7,389
ec0113dbd79e936e614bb7ee7e48d29aa616d511
num=int(input()) i=10 while i>=1: print(i,end=" ") i-=1
7,390
e9a1fd8464f6c1e65aa2c1af60becbfcbf050814
import tensorflow as tf import numpy as np import tensorflow.contrib.layers as layers class Model(object): def __init__(self, batch_size=128, learning_rate=0.01, num_labels=10, keep_prob=0.5, scope="model"): self._batch_size = batch_size self._learning_rate = learning_rate self._num_labels = num_labels self._scope = scope self._keep_prob = keep_prob self._conv_hidden_dims = [192, 192] with tf.variable_scope(self._scope): self._build_model() def _build_net(self, x, reuse=False, trainable=True, scope="inference_net"): with tf.variable_scope(scope, reuse=reuse): out = x for i in range(len(self._conv_hidden_dims)): out = layers.conv2d(out, num_outputs=self._conv_hidden_dims[i], kernel_size=(5, 5), activation_fn=tf.nn.relu, trainable=trainable) out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable) out = layers.max_pool2d(out, kernel_size=(2, 2)) out = layers.flatten(out) out = layers.fully_connected(out, num_outputs=1000, activation_fn=tf.nn.relu, trainable=trainable) out = layers.dropout(out, keep_prob=self._keep_prob, is_training=trainable) logits = layers.fully_connected(out, self._num_labels, trainable=trainable) return logits def _build_model(self): self.x_ = tf.placeholder(tf.float32, shape=[None, 3072], name='x_') # data gets loaded as a 32x32 vector x = tf.reshape(self.x_, [-1, 32, 32, 3], name='x') # CIFAR dataset is shape 32,32,3 self.y = tf.placeholder(tf.float32, shape=[None, self._num_labels], name='y') # 10 labels # self.keep_prob = tf.placeholder(tf.float32, name='dropout_prob') self.lr = tf.placeholder(tf.float32, shape=(), name='lr') self.logits = self._build_net(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y) self.loss = tf.reduce_mean(cross_entropy) optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9, use_nesterov=True) self.train_op = optimizer.minimize(loss=self.loss) self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32)) # for eval steps self.val_logits = self._build_net(x, reuse=True, trainable=False) self.val_acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.val_logits, 1), tf.argmax(self.y, 1)), dtype=tf.float32)) tf.summary.scalar('loss', self.loss) tf.summary.scalar('accuracy', self.acc) self.merged = tf.summary.merge_all()
7,391
34a456efc72b303aed5f722bb415d30ff62addab
import numpy as np import argparse import torch from gridworlds.envs import GridWorldEnv, generate_obs_dict from gridworlds.constants import possible_objects import nengo_spa as spa from collections import OrderedDict from spatial_semantic_pointers.utils import encode_point, ssp_to_loc, get_heatmap_vectors import matplotlib.pyplot as plt import seaborn as sns seed=13 np.random.seed(seed) maze_index = 0 ssp_dim = 512 n_sensors = 36 dataset = '/home/ctnuser/ssp-navigation/ssp_navigation/datasets/mixed_style_100mazes_100goals_64res_13size_13seed/maze_dataset.npz' params = { 'continuous': True, 'fov': 360, 'n_sensors': n_sensors, 'max_sensor_dist': 10, 'normalize_dist_sensors': False, 'movement_type': 'holonomic', 'seed': seed, # 'map_style': args.map_style, 'map_size': 10, 'fixed_episode_length': False, # Setting to false so location resets are not automatic 'episode_length': 1000, #200, 'max_lin_vel': 5, 'max_ang_vel': 5, 'dt': 0.1, 'full_map_obs': False, 'pob': 0, 'n_grid_cells': 0, 'heading': 'none', 'location': 'none', 'goal_loc': 'none', 'goal_vec': 'none', 'bc_n_ring': 0, 'hd_n_cells': 0, 'csp_dim': 0, 'goal_csp': False, 'agent_csp': False, 'goal_distance': 0,#args.goal_distance # 0 means completely random } obs_dict = generate_obs_dict(params) np.random.seed(params['seed']) data = np.load(dataset) # n_mazes by size by size coarse_mazes = data['coarse_mazes'] coarse_size = coarse_mazes.shape[1] n_maps = coarse_mazes.shape[0] # Size of map IDs. Equal to n_maps if using one-hot encoding id_size = n_maps map_id = np.zeros((n_maps,)) map_id[maze_index] = 1 map_id = torch.Tensor(map_id).unsqueeze(0) # n_mazes by res by res fine_mazes = data['fine_mazes'] xs = data['xs'] ys = data['ys'] res = fine_mazes.shape[1] coarse_xs = np.linspace(xs[0], xs[-1], coarse_size) coarse_ys = np.linspace(ys[0], ys[-1], coarse_size) map_array = coarse_mazes[maze_index, :, :] x_axis_sp = spa.SemanticPointer(data=data['x_axis_sp']) y_axis_sp = spa.SemanticPointer(data=data['y_axis_sp']) heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp) coarse_heatmap_vectors = get_heatmap_vectors(coarse_xs, coarse_ys, x_axis_sp, y_axis_sp) # fixed random set of locations for the goals limit_range = xs[-1] - xs[0] goal_sps = data['goal_sps'] goals = data['goals'] # print(np.min(goals)) # print(np.max(goals)) goals_scaled = ((goals - xs[0]) / limit_range) * coarse_size # print(np.min(goals_scaled)) # print(np.max(goals_scaled)) n_goals = 0#10 # TODO: make this a parameter object_locations = OrderedDict() vocab = {} use_dataset_goals = False for i in range(n_goals): sp_name = possible_objects[i] if use_dataset_goals: object_locations[sp_name] = goals_scaled[maze_index, i] # using goal locations from the dataset else: # If set to None, the environment will choose a random free space on init object_locations[sp_name] = None # vocab[sp_name] = spa.SemanticPointer(ssp_dim) vocab[sp_name] = spa.SemanticPointer(data=np.random.uniform(-1, 1, size=ssp_dim)).normalized() env = GridWorldEnv( map_array=map_array, object_locations=object_locations, # object locations explicitly chosen so a fixed SSP memory can be given observations=obs_dict, movement_type=params['movement_type'], max_lin_vel=params['max_lin_vel'], max_ang_vel=params['max_ang_vel'], continuous=params['continuous'], max_steps=params['episode_length'], fixed_episode_length=params['fixed_episode_length'], dt=params['dt'], screen_width=300, screen_height=300, debug_ghost=True, ) # obs = env.reset(goal_distance=params['goal_distance']) # env.set_agent_state(np.array([6, 9, 0])) env.set_agent_state(np.array([3, 7, 0])) env.step(np.array([0, 0])) env.render() env._render_extras() sensors = env.get_dist_sensor_readings( state=env.state, n_sensors=params['n_sensors'], fov_rad=params['fov']*np.pi/180., max_dist=params['max_sensor_dist'], normalize=params['normalize_dist_sensors'], ) fig, ax = plt.subplots(1, 1, figsize=(3, 3), tight_layout=True) ax.bar(np.arange(len(sensors)), sensors) ax.set_ylabel('Distance') ax.set_xlabel('Sensor Index') sns.despine() plt.show()
7,392
1af9fb91e69ea78709c47fca6b12e4f7a6fd17a8
import unittest import os import tempfile import numpy as np from keras_piecewise.backend import keras from keras_piecewise import Piecewise2D from .util import MaxPool2D class TestPool2D(unittest.TestCase): @staticmethod def _build_model(input_shape, layer, row_num, col_num, pos_type=Piecewise2D.POS_TYPE_SEGMENTS): data_input = keras.layers.Input(shape=input_shape) row_input = keras.layers.Input(shape=(row_num,)) col_input = keras.layers.Input(shape=(col_num,)) pool_layer = Piecewise2D( layer=layer, pos_type=pos_type, )([data_input, row_input, col_input]) model = keras.models.Model(inputs=[data_input, row_input, col_input], outputs=pool_layer) model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.mean_squared_error) model.summary() return model def test_max_2d(self): data = [ [ [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4], ], [ [1, 3, 5, 2], [2, 5, 6, 1], [7, 1, 5, 3], [7, 2, 2, 4], ], ] rows = [ [2, 4], [3, 4], ] cols = [ [1, 2, 4], [1, 3, 4], ] model = self._build_model( input_shape=(None, None), layer=MaxPool2D(), row_num=len(rows[0]), col_num=len(cols[0]), ) predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist() expected = [ [ [2.0, 5.0, 6.0], [7.0, 2.0, 5.0], ], [ [7.0, 6.0, 3.0], [7.0, 2.0, 4.0], ], ] self.assertEqual(expected, predicts) cols = [ [1, 2, 0, 4], [1, 3, 2, 4], ] model = self._build_model( input_shape=(None, None), layer=MaxPool2D(), row_num=len(rows[0]), col_num=len(cols[0]), pos_type=Piecewise2D.POS_TYPE_PAIRS, ) model_path = os.path.join(tempfile.gettempdir(), 'keras_piece_test_save_load_%f.h5' % np.random.random()) model.save(model_path) model = keras.models.load_model(model_path, custom_objects={ 'Piecewise2D': Piecewise2D, 'MaxPool2D': MaxPool2D, }) predicts = model.predict([np.asarray(data), np.asarray(rows), np.asarray(cols)]).tolist() expected = [ [[2.0, 7.0]], [[2.0, 4.0]], ] self.assertEqual(expected, predicts) def test_pos_type_not_implemented(self): with self.assertRaises(NotImplementedError): self._build_model( input_shape=(None,), layer=MaxPool2D(), row_num=13, col_num=17, pos_type='whatever', )
7,393
096d82e1f9e8832f6605d23c8bb324e045b6b14f
## Script (Python) "after_rigetta" ##bind container=container ##bind context=context ##bind namespace= ##bind script=script ##bind subpath=traverse_subpath ##parameters=state_change ##title= ## doc = state_change.object #Aggiornamento dello stato su plominoDocument doc.updateStatus() if script.run_script(doc, script.id) != False: #### OTHER CODE HERE #### # 1. INVIO MAIL RIGETTO if doc.naming('richiesta') != 'integrazione': doc.sendThisMail('rigetta') script.run_script(doc, script.id, suffix='post') #### SCRIPT ENDS HERE ####
7,394
303d56c18cce922ace45de1b8e195ebfdd874e23
class product(object): def __init__(self, item_name, price, weight, brand, status = "for sale"): self.item_name = item_name self.price = price self.weight = weight self.brand = brand self.cost = price self.status = status self.displayInfo() def displayInfo(self): print "Item name:", self.item_name print "Price:", self.price print "Weight:", self.weight print "Brand:", self.brand print "Cost:", self.cost print "Status:", self.status return self def sell(self): self.status = "Sold" return self def addTax(self, num): self.cost = self.cost * (1+num) return self def Return(self, reason): if reason == "Defective": self.cost = 0 self.status = reason elif reason == "Opened": self.cost = self.cost * 0.80 self.status = "for sale" elif reason == "Box": self.status = "for sale" return self print "add items to inv" product1 = product("Kona Dew", 499, 1.2, "Kona") product2 = product("Kona Dew Plus", 799, 1.5, "Kona") product3 = product("Kona Dr.Dew", 999, 1.2, "Kona") product1.addTax(0.10) product2.addTax(0.15) product3.addTax(0.11) print "add tax" product1.displayInfo() product2.displayInfo() product3.displayInfo() product1.sell() product2.sell() product3.sell() print "sell items" product1.displayInfo() product2.displayInfo() product3.displayInfo() product1.Return("Defective") product2.Return("Box") product3.Return("Opened") print "return items" product1.displayInfo() product2.displayInfo() product3.displayInfo()
7,395
a538c6d8c9f99bc37def5817a54c831393c051f3
#!/usr/bin/python try: fh = open('testfile','w') try: fh.write('This is my test file for this exception') finally: print "Going to close file" fh.close() except IOError: print" Error: can\'t find file or read data"
7,396
582cbacd26f4a3ed0b4f5c85af67758de7c05836
### To run the test use: ### python3 -m unittest test_script.py import unittest import re class TestCustomerDetails(unittest.TestCase): def test_cu_name(self): ### Our script has a class ConfigurationParser, which will have method ParseCuNames cp = ConfigurationParser() ### expected names which is present on config.txt exp_names= ['CUSTOMER_A','CUSTOMER_B'] ### names return by our code parsed_names = cp.ParseCuNames() ### Names returned by our code should be in list self.assertEqual(list,type(parsed_names)) ### Names returned by our code should be the same as expected names (exp_names) self.assertEqual(exp_names,parsed_names) def test_cust_vlan(self): ### Our script has a class ConfigurationParser, which will have method ParseCuNames cp = ConfigurationParser() ### Example of the customer name from config.txt cu_name = "CUSTOMER_A" ### Example of the vlan for CUSTOMER_A from config.txt exp_vlan = ['100'] ### ParseVlan will return a vlan number based on the give customer name parsed_vlan = cp.ParseVlan(cu_name) ### Compare manually checked VLAN (exp_vlan) with the one calculated by the script (parsed_vlan) self.assertEqual(exp_vlan,parsed_vlan) class ConfigurationParser: ### my class which will implement the desired functionality deviceConfig = open("config.txt","r").read() def ParseCuNames(self): ### Regular expression to find the Customer name from 'ip vrf <Customer Name>' output cuNamePattern = r"ip vrf ([a-zA-Z_]+)\n" ### findall with groups returns only matched group value. cuNames = re.findall(cuNamePattern,deviceConfig) return cuNames def ParseVlan(self, cuName): ### Regex to find vlan number which is the same as sub interface number intPattern=r"interface GigabitEthernet0\/0.([0-9]+)\n\s+encapsulation\s+dot1Q [0-9]+\n\s+ip vrf forwarding %s" % (cuName) ### Apply regex to the file to get vlan numbers allCuSubInterfaces = re.findall(intPattern,deviceConfig) return allCuSubInterfaces
7,397
0ebd3ca5fd29b0f2f2149dd162b37f39668f1c58
from xgboost import XGBRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import pandas as pd import numpy as np from ghg import GHGPredictor predictor = GHGPredictor() dataset_df = pd.read_csv("db-wheat.csv", index_col=0) # print(dataset_df.iloc[1]) dataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop', 'Previous crop']) # print(dataset_df_2) dataset = dataset_df_2.to_numpy() # print(dataset) X, Y = dataset[:, :-1], dataset[:, -1:] # print(X) # print(Y) seed = 10 test_size = 0.2 X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) # print(len(X_train)) # print(len(X_test)) # print(len(Y_train)) # print(len(Y_test)) model = XGBRegressor() model.fit(X_train, Y_train) # print(model) print(dataset_df_2.columns) print(model.feature_importances_) # print(X_test.shape) y_pred = model.predict(X_test) # predictions = [round(value) for value in y_pred] Y_test = map(lambda x: x[0], Y_test) # print(Y_test) res = zip(y_pred, Y_test) # print(list(res)) ghg_predictor = GHGPredictor() def predict(model, row): preds = [] # print(row) # print(row.).shape) for perc in range(-10, 11): new_row = row.copy() row_copy = row.copy() # new_row = new_row.iloc[0] new_row = new_row.drop(labels=['Area', 'Year', 'Crop', 'Previous crop', 'Yield']) # print(new_row.labels) # new_row = new_row.tolist() # print(new_row) # print(type(new_row)) nitrogen = new_row['N'] * ((100 + perc) / 100) new_row['N'] = nitrogen row_copy['N'] = nitrogen new_row = np.array([new_row]) # print(new_row) pred = model.predict(new_row) row_df = pd.DataFrame([row_copy]) fuel_ghg = predictor.fuel_ghg_emissions(row_df["Area"], unit="kg") fuel_ghg = fuel_ghg.values[0] ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'], row_df['Area'], row_df['Crop'], row_df['Yield']) ms_ghg = ms_ghg.values[0] sum_ghg = fuel_ghg + ms_ghg area = row_df['Area'].iloc[0] # print(area) # print(sum_ghg) # print(row_df['N']) # print(sum_ghg) # GHG # fuel = ghg_predictor.fuel_ghg_emissions() preds.append([nitrogen, pred[0], sum_ghg]) print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg)) return preds # accuracy = accuracy_score(Y_test, predictions) # print("Accuracy: %.2f%%" % (accuracy * 100.0)) import random rand_ind = random.randrange(0, len(dataset)) rand_row = dataset_df.iloc[rand_ind] while rand_row['N'] == 0: rand_ind = random.randrange(0, len(dataset)) rand_row = dataset_df.iloc[rand_ind] # rand_row = rand_row[:-1] preds = predict(model, rand_row) import matplotlib.pyplot as plt fig, ax1 = plt.subplots() n_amount = [x[0] for x in preds] yield_p = [x[1] for x in preds] ghg_p = [x[2] for x in preds] color = 'tab:red' ax1.set_xlabel('N') ax1.set_ylabel('Yield (t)', color=color) ax1.set_title(f'GHG and yield predictions (Area: {rand_row["Area"]} ha)') ax1.plot(n_amount, yield_p, color=color) ax1.tick_params(axis='y', labelcolor=color) ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis color = 'tab:blue' ax2.set_ylabel('CO2 (kg)', color=color) # we already handled the x-label with ax1 ax2.plot(n_amount, ghg_p, color=color) ax2.tick_params(axis='y', labelcolor=color) print(n_amount) fig.tight_layout() # otherwise the right y-label is slightly clipped plt.show()
7,398
7af19f69e6c419649a5999f594118ad13833a537
# -*- coding: utf-8 -*- """ Created on Mon Aug 13 14:10:15 2018 9.5 项目:将一个文件夹备份到一个 ZIP 文件 @author: NEVERGUVEIP """ #! python3 import zipfile,os def backupToZip(folder): #backup the entire contents of 'folder' into a ZIP file folder = os.path.abspath(folder) os.chdir(folder) #figure out the filename this code should use based on #what files already exist. number = 1 #从1循环检查文件名存不存在,_1.zip,_2.zip,,防止备份以前的备份文件 while True: zipFilename = os.path.basename(folder) +'_'+str(number)+'.zip' if not os.path.exists(zipFilename): break number = number +1 #creat the zip file print('creating %s...'%(zipFilename)) backupZip = zipfile.ZipFile(zipFilename,'w') #TODO: walk the entire folder tree and compress the files in each folder. for foldername,subfolders,filenames in os.walk(folder):# print('adding files in %s...'%(foldername)) #add the current folder to the zip file. backupZip.write(foldername) #add all the files in this folder to the ZIP file. for filename in filenames: newBase = os.path.basename(folder)+'_' if filename.startswith(newBase) and filename.endswith('.zip'): continue# don't backup the backup ZIP files backupZip.write(os.path.join(foldername,filename)) backupZip.close() print('......Done......') #backupToZip(r'C:\Users\NEVERGUVEIP\Documents\GitHub\python_test') backupToZip('.')
7,399
d39965c3070ec25230b4d6977ff949b3db070ab6
""" Add requests application (adding and managing add-requests) """ from flask import Blueprint __author__ = 'Xomak' add_requests = Blueprint('addrequests', __name__, template_folder='templates', ) from . import routes