index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
998,600
d2c51f9398e7d184ba0b0addd04601961d328551
import numpy as np from collections import defaultdict from nn import Block, Vars, Dot, Softmax from vocab import Vocab from nn.utils import timeit VOCAB = """i would like some chinese food what about indian give me czech i like english food ok chong is a good place i have taj here go to hospoda tavern is not bad . """ class DB2(Block): def __init__(self, content, vocab): self.content = content self.vocab = Vocab() self.vocab.add('[EOS]') for word in vocab: self.vocab.add(word) self.map = defaultdict(list) self.map_rev = defaultdict(list) for e1, r, e2 in self.content: self.map[self.vocab[e2]].append((self.vocab[e1], self.vocab[r])) self.map = dict(self.map) def words_to_ids(self, words): res = [] for word in words: res.append(self.vocab.add(word)) return np.array(res) def get_vector(self, *words): res = np.zeros((len(self.vocab), )) for w in words: q_id = self.vocab[w] res[q_id] = 1.0 return res def build_p(self, u): return np.dot(self.entries_a, u) def softmax(self, x): ex = np.exp(x) return ex / np.sum(ex) def _fwd_map(self, x, map): w = np.zeros_like(x) for i, val in enumerate(x): if i in map: w[map[i]] += val return w def forward(self, (e1, r)): e2 = np.zeros_like(e1) for i in range(len(e1)): if i in self.map: #print 'counting', i for e1_dim, r_dim in self.map[i]: #print ' ', e1_dim, r_dim, e1[e1_dim], r[r_dim] e2[i] += e1[e1_dim] * r[r_dim] aux = Vars( e1=e1, r=r ) return ((e2, ), aux) def backward(self, aux, (dy, )): r = aux['r'] e1 = aux['e1'] de1 = np.zeros_like(dy) dr = np.zeros_like(dy) for i in range(len(dy)): if i in self.map: for e1_dim, r_dim in self.map[i]: de1[e1_dim] += r[r_dim] * dy[i] dr[r_dim] += e1[e1_dim] * dy[i] return (de1, dr, )
998,601
c587f328b21be869a5ef072622c9f4dfc2942530
f1,z=input().split() print(f1+z)
998,602
7e39d189ba2f83311b918c4e1bc5c04b1e98a897
import math import numpy as np from sklearn.model_selection import train_test_split import os def generateFormula1Values(): values = [] x = -1 while (x <= 1): x = round(x, 3) values.append(x) x += 0.002 return np.array(values) def generateFormula2Values(): values =[] x = y = -1 while (x <= 1 and y <= 1): x = round(x, 2) y = round(y, 2) values.append([x, y]) x += 0.05 y += 0.05 return np.array(values) def formula1(xList): labels = [] for x in xList: formula = math.sin(2*math.pi*x)+math.sin(5*math.pi*x) labels.append(formula) return np.array(labels) def formula2(xyList): labels = [] for (x,y) in xyList: formula = math.exp(-(x**2+y**2)/0.1) labels.append(formula) return np.array(labels) #generate data values formula1Values = generateFormula1Values() formula2Values = generateFormula2Values() #shuffle data #np.random.shuffle(formula1Values) #np.random.shuffle(formula2Values) #generate labels formula1Labels = formula1(formula1Values) formula2Labels = formula2(formula2Values) #generate 700 samples for training set train1, rest1, train1labels, rest1labels = train_test_split(formula1Values, formula1Labels, test_size=0.3, shuffle=False) test1, val1, test1labels, val1labels = train_test_split(rest1, rest1labels, test_size=0.5, shuffle=False) #generate (42,2) samples for training set train2, rest2, train2labels, rest2labels = train_test_split(formula2Values, formula2Labels, test_size=0.3, shuffle=False) test2, val2, test2labels, val2labels = train_test_split(rest2, rest2labels, test_size=0.5, shuffle=False) print(train1.shape) print(test1labels.shape) print(val1.shape) print(train2.shape) print(test2.shape) print(val2.shape) np.save('formula1-data/values', formula1Values) np.save('formula1-data/labels', formula1Labels) np.save('formula1-data/traindata/training-data',train1) np.save('formula1-data/traindata/training-labels',train1labels) np.save('formula1-data/validationdata/validation-data',val1) np.save('formula1-data/validationdata/validation-labels',val1labels) np.save('formula1-data/testdata/test-data',test1) np.save('formula1-data/testdata/test-labels',test1labels) np.save('formula2-data/values', formula2Values) np.save('formula2-data/labels', formula2Labels) np.save('formula2-data/traindata/training-data',train2) np.save('formula2-data/traindata/training-labels',train2labels) np.save('formula2-data/validationdata/validation-data',val2) np.save('formula2-data/validationdata/validation-labels',val2labels) np.save('formula2-data/testdata/test-data',test2) np.save('formula2-data/testdata/test-labels',test2labels)
998,603
a9298a1eb713ef5af695abbe1f84aa78b32d5f46
import math class Queue: def __init__(self, capacity): self.capacity = capacity self.storage = [None] * (self.capacity + 1) self.head = 0 self.tail = 0 def enqueue(self, x): if (self.head - self.tail) % len(self.storage) == self.capacity: raise Exception("Queue is full") self.storage[self.head] = x self.head = (self.head + 1) % len(self.storage) def dequeue(self): result = self.storage[self.tail] self.tail = (self.tail + 1) % len(self.storage) return result def is_empty(self): return self.head == self.tail def capacity(graph, flow, u, v): result = 0 if graph[u][v] > 0: result = graph[u][v] - flow[u][v] if graph[v][u] > 0: result = flow[v][u] return result def bfs(graph, flow, source, target): q = Queue(len(graph)) q.enqueue((source, None)) parent_map = [None] * len(graph) visited = [False] * len(graph) while not q.is_empty(): u, parent = q.dequeue() parent_map[u] = parent visited[u] = True for v in range(len(graph)): if not visited[v] and capacity(graph, flow, u, v) > 0: q.enqueue((v, u)) return parent_map def max_flow(graph, source, target): flow = [[0] * len(graph) for _ in range(len(graph))] while (parent_map := bfs(graph, flow, source, target))[target] is not None: u = parent_map[target] v = target min_capacity = math.inf while u is not None: c = capacity(graph, flow, u, v) if c < min_capacity: min_capacity = c u = parent_map[u] v = parent_map[v] u = parent_map[target] v = target while u is not None: if graph[u][v] > 0: flow[u][v] = flow[u][v] + min_capacity else: flow[v][u] = flow[v][u] - min_capacity u = parent_map[u] v = parent_map[v] return flow graph = [ [0, 80, 10, 0], [0, 0, 50, 20], [0, 0, 0, 80], [0, 0, 0, 0] ] print(max_flow(graph, 0, 3))
998,604
f63dc97cb43a94f83999e1c991f91730898e455b
# -*- coding: utf-8 -*- """ webpanda.jobs.forms ~~~~~~~~~~~~~~~~~~~~~~~ Jobs forms """ from webpanda.forms import RedirectForm from wtforms import IntegerField, StringField, BooleanField, SubmitField, HiddenField, TextAreaField, SelectField from wtforms import validators from wtforms.validators import Length, Required class NewJobForm(RedirectForm): site = SelectField(u'Computing site', [validators.required()], coerce=int) distr = SelectField(u'Distributive', coerce=str) params = TextAreaField(u'Parameters', [validators.required(), validators.length(1, 1000)]) container = HiddenField(default="") corecount = IntegerField('Cores', default=1) ftpdir = StringField(u'FTP DIR') submitbtn = SubmitField(u'Send job') onebyone = BooleanField(u'One file one job', default=False) tags = StringField(u'Tags') class JobResendForm(RedirectForm): id_ = IntegerField('id_', default=1) class JobKillForm(RedirectForm): id_ = IntegerField('id_', default=1) class NewDistrForm(RedirectForm): name = StringField('Name', [validators.required(), validators.length(1, 64)]) version = StringField('Version', [validators.required(), validators.length(1, 64)]) release = IntegerField('Release')
998,605
9b92cfe96bd7c6b03d756c9fb0ee4fba087f359d
#!/usr/bin/env python # # The output of parallel_summarize_job.sh can be very verbose and difficult to # sift through to find jobs that were not correctly processed. This script # scrapes the combined stderr+stdout stream of parallel_summarize_job.sh and # highlights the darshan log file and error generated for jobs that failed to # produce a summary json file. # import os import sys import re REX_START = re.compile('UserWarning: Unhandled exception while processing (.*)$') REX_END = re.compile('^([a-zA-z]+:.*|AssertionError)$') filename = None for line in open(sys.argv[1], 'r'): found = REX_START.search(line) if filename is None and found: filename = found.group(1) elif filename is not None: if REX_END.search(line): print line.strip(), filename filename = None
998,606
961a7d92e9dc418cd5c72f4453f31fd47de25936
import numpy as np from embed.spectrum import spectrum_map from falkon import Falkon, kernels import torch from tqdm import tqdm def mmd(seq1=None, seq2=None, emb1=None, emb2=None, mean1=None, mean2=None, embedding='spectrum', kernel='linear', return_pvalue=False, progress=False, **kwargs): ''' Calculates MMD between two sets of sequences. Optionally takes embeddings or mean embeddings of sequences if these have been precomputed for efficiency. If <return_pvalue> is true, a Monte-Carlo estimate (1000 iterations) of the p-value is returned. Note that this is compute-intensive and only implemented for the linear kernel. ''' if embedding == 'spectrum': embed = spectrum_map if embedding == 'profet': raise NotImplementedError if embedding == 'unirep': raise NotImplementedError if mean1 is None and emb1 is None: emb1 = embed(seq1, progress=progress, **kwargs) if mean2 is None and emb2 is None: emb2 = embed(seq2, progress=progress, **kwargs) if mean1 is None: x = np.mean(emb1, axis=0) else: x = mean1 if mean2 is None: y = np.mean(emb2, axis=0) else: y = mean2 if kernel == 'linear': MMD = np.sqrt(np.dot(x,x) + np.dot(y,y) - 2*np.dot(x,y)) if return_pvalue: m = len(emb1) agg = np.concatenate((emb1,emb2),axis=0) mmds = [] it = tqdm(range(1000)) if progress else range(1000) for i in it: np.random.shuffle(agg) _emb1 = agg[:m] _emb2 = agg[m:] mmds.append(mmd(emb1=_emb1, emb2=_emb2)) rank = float(sum([x<=MMD for x in mmds]))+1 pval = (1000+1-rank)/(1000+1) return MMD, pval else: return MMD elif kernel == 'gaussian': gauss = kernels.GaussianKernel(sigma=1.0) x = torch.from_numpy(emb1) y = torch.from_numpy(emb2) m = float(len(emb1)) n = float(len(emb2)) Kxx = gauss(x,x).numpy() Kxy = gauss(x,y).numpy() Kyy = gauss(y,y).numpy() return np.sqrt( np.sum(Kxx) / (m**2) - 2 * np.sum(Kxy) / (m*n) + np.sum(Kyy) / (n**2) )
998,607
c73fa600c5c1e275ba9b7e8e5beeb3969d24d6f7
from django.db import models class tImage(models.Model): name=models.CharField(max_length=30) data=models.CharField(max_length=350) img=models.ImageField(upload_to = './img') class Image(models.Model): img = models.ImageField(upload_to = './img') class Text(models.Model): text = models.CharField(max_length=1024) class company(models.Model): name = models.CharField(max_length=64) intr = models.TextField() img = models.ImageField(upload_to = './img')
998,608
f749806b18d56ad6e6c1bf57cdbbd7feaf1985e4
k, n, w = map(int, input().split()) b = [i*k for i in range(1, w+1)] a = sum(b)-n if sum(b) > n: print(a) else: print(0)
998,609
1cc8b7d60587079e9e306274d784fc44f8f1a95b
import sys import os import KratosMultiphysics.KratosUnittest as KratosUnittest import test_helper class KratosGeoMechanicsElementTypeTests(KratosUnittest.TestCase): """ This class contains benchmark tests which are checked with the analytical solution """ def setUp(self): # Code here will be placed BEFORE every test in this TestCase. pass def tearDown(self): # Code here will be placed AFTER every test in this TestCase. pass def test_triangle_3n(self): test_name = 'test_triangle_3n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 1, 5] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_triangle_3n_rebuild_level_0(self): test_name = 'test_triangle_3n_rebuild_0' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 1, 5] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_triangle_6n(self): test_name = 'test_triangle_6n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 2, 4, 9, 15] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_triangle_10n(self): test_name = 'test_triangle_10n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [9, 8, 7, 6] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_triangle_15n(self): test_name = 'test_triangle_15n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [12, 11, 10, 9, 8] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_triangle_3n_fic(self): test_name = 'test_triangle_3n_fic' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 1, 5] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_triangle_6n_fic(self): test_name = 'test_triangle_6n_fic' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 2, 4, 9, 15] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_quad_4n(self): test_name = 'test_quad_4n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 1, 5] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_quad_8n(self): test_name = 'test_quad_8n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 2, 4, 8, 12] n_dim = 2 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_tetra_4n(self): test_name = 'test_tetra_4n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 2, 9, 3, 6, 13, 8, 14, 20] n_dim = 3 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def test_tetra_10n(self): test_name = 'test_tetra_10n' file_path = test_helper.get_file_path(os.path.join('.', test_name + '.gid')) simulation = test_helper.run_kratos(file_path) top_node_nbrs = [0, 3, 9, 28, 53, 1, 6, 13, 32, 10, 15, 21, 44, 76, 27, 34, 41, 64, 94, 51, 56, 74, 95, 110] n_dim = 3 self.assert_linear_elastic_block(simulation, top_node_nbrs, n_dim) def assert_linear_elastic_block(self,simulation, top_node_nbrs, n_dim): """ Assert results of a linear elastic block. The sides of the block can move freely in vertical direction and are fixed in horizontal direction. The bottom of the block is fixed. On top of the block, a load of 10kN/m2 is placed. Results are: total stresses, effective stresses, displacements and green langrange strains. :param simulation: Kratos simulation :param top_node_nbrs: node numbers of the nodes at the top of the geometry :param n_dim: number of dimensions :return: """ total_stresses = test_helper.get_total_stress_tensor(simulation) total_stresses_xx = [integration_point[0,0] for element in total_stresses for integration_point in element] if n_dim >= 2: total_stresses_yy = [integration_point[1,1] for element in total_stresses for integration_point in element] if n_dim >= 3: total_stresses_zz = [integration_point[2,2] for element in total_stresses for integration_point in element] effective_stresses = test_helper.get_cauchy_stress_tensor(simulation) effective_stresses_xx = [integration_point[0,0] for element in effective_stresses for integration_point in element] if n_dim >= 2: effective_stresses_yy = [integration_point[1,1] for element in effective_stresses for integration_point in element] if n_dim >= 3: effective_stresses_zz = [integration_point[2,2] for element in effective_stresses for integration_point in element] displacements = test_helper.get_displacement(simulation) x_displacements = [displacement[0] for displacement in displacements] if n_dim >= 2: y_displacements = [displacement[1] for displacement in displacements] if n_dim >= 3: z_displacements = [displacement[2] for displacement in displacements] green_lagrange_strains = test_helper.get_green_lagrange_strain_tensor(simulation) green_lagrange_strains_xx = [integration_point[0,0] for element in green_lagrange_strains for integration_point in element] if n_dim == 2: green_lagrange_strains_yy = [integration_point[1,1] for element in green_lagrange_strains for integration_point in element] elif n_dim == 3: green_lagrange_strains_yy = [integration_point[1,1] for element in green_lagrange_strains for integration_point in element] green_lagrange_strains_zz = [integration_point[2,2] for element in green_lagrange_strains for integration_point in element] # Assert integration point information for idx, total_stress_xx in enumerate(total_stresses_xx): self.assertAlmostEqual(0.0, total_stress_xx) self.assertAlmostEqual(-1e4, total_stresses_yy[idx]) if n_dim >= 3: self.assertAlmostEqual(0.0, total_stresses_zz[idx]) self.assertAlmostEqual(0.0, effective_stresses_xx[idx]) self.assertAlmostEqual(-1e4, effective_stresses_yy[idx]) if n_dim >= 3: self.assertAlmostEqual(0.0, effective_stresses_zz[idx]) self.assertAlmostEqual(0.0, green_lagrange_strains_xx[idx]) self.assertAlmostEqual(-0.00033333, green_lagrange_strains_yy[idx]) if n_dim >= 3: self.assertAlmostEqual(0.0, green_lagrange_strains_zz[idx]) # Assert displacements for x_displacement in x_displacements: self.assertAlmostEqual(0.0, x_displacement) for top_node_nbr in top_node_nbrs: self.assertAlmostEqual(-0.00033333, y_displacements[top_node_nbr], 6) if n_dim >= 3: for z_displacement in z_displacements: self.assertAlmostEqual(0.0, z_displacement) if __name__ == '__main__': KratosUnittest.main()
998,610
a3467e08ec49bd1bf69d7f174379fe036c27fbdf
a, r, n = map(int, input().split()) print(a * r ** (n - 1))
998,611
bdbca3b1cee54f9f910db2c294ac4574ee2465f0
from flask import Blueprint, render_template from .models import TODOS HelloApi = Blueprint('hello_api', __name__) tasks = TODOS.query.order_by((TODOS.due_date).asc()).all() @HelloApi.route('/<name>') def hello_user(name): return render_template('hello.html', user=name, tasks=tasks)
998,612
d894cabb566392b38c3d0166ec323223ea816135
# -*- coding: utf-8 -*- """ Created on Mon Feb 1 14:36:21 2021 @author: USER """ from mcpi.minecraft import Minecraft as mcs import time mc = mcs.create() while True: time.sleep(0.5) x,y,z = mc.player.getTilePos() mc.setBlock(x,y,z,46) mc.setBlock(x+1,y,z,152)
998,613
6aeb20d294b3eea42a45da783e696d9b99c1af3b
# -*- coding: utf-8 -*- """ @author: 陳柏劭 """ chi=int(input("國文:")) eng=int(input("英文:")) math=int(input("微積分:")) pe=int(input("體育:")) code=int(input("程式設計:")) avg=round((chi+eng+math+pe+code)/5,2) print("平均分數:"+str(avg)) dict1={chi:"國文",eng:"英文",math:"微積分",pe:"體育",code:"程式設計"} list1=[chi,eng,math,pe,code] mmm=max(list1) nnn=min(list1) print("最高分科目:"+str(dict1[mmm])+str(mmm)+"分") print("最低分科目:"+str(dict1[nnn])+str(nnn)+"分")
998,614
c06217c63dd9a7d955ae6f2545773486d84158b0
''' 4. Write a Python program to accept a filename from the user and print the extension of that. Sample filename : abc.java Output : java ''' file=input("Please enter a file full name: ") type=file.split(".") print("Your file type is: " + repr(type[-1]))
998,615
c13a4d01c36b7ef5c2a55822094ed2f2955e8916
from adk.adkplugin import ADKPlugin import os import logging class InitPlugin(ADKPlugin): def name(self): return "init" def describe(self): return "Bootstrap the environment" def run(self,appliance, settings): for directory in ["log_directory", "output_directory", \ "temp_directory", "log_directory"]: if directory in settings: dir_value = settings[directory] self.create_directory(dir_value) def get_plugin(): return InitPlugin()
998,616
41278aea992a4e1503ca559eb8eb917f986b123f
import numpy as np import time import imutils import cv2 face_cascade = cv2.CascadeClassifier('C:/Program Files/Python37/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml') low_cascade = cv2.CascadeClassifier('C:/Program Files/Python37/Lib/site-packages/cv2/data/haarcascade_lowerbody.xml') video = cv2.VideoCapture("1.mp4") width = 800 avg = None while 1: ret, frame = video.read() flag = True text="" frame = imutils.resize(frame, width=width) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY ) gray = cv2.GaussianBlur(gray, (5, 5), 0) if avg is None: avg = gray.copy().astype("float") continue cv2.accumulateWeighted(gray, avg, 0.000001) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg)) # faces = face_cascade.detectMultiScale(frameDelta, # scaleFactor=1.1, # minNeighbors=5, # minSize=(30, 30)) low = low_cascade.detectMultiScale(frame, 1.1 , 3) # for (x,y,w,h) in faces: # cv2.rectangle(frame, (x,y), (x+w, y+h), (12,150,100),2) for (x,y,w,h) in low: cv2.rectangle(frame, (x,y), (x+w, y+h), (12,150,100),2) # cv2.line(frame, (0, 110), (width * 3//4,170), (0,255,0), 2) # cv2.line(frame, (0, 160), (width * 3//4,220), (0,255,0), 2) cv2.imshow("Frame",frame) cv2.imshow("Gray", gray) cv2.imshow("FrameDelta", frameDelta) key = cv2.waitKey(1) & 0xFF # time.sleep(0.03) if key == ord('q'): break video.release() cv2.destroyAllWindows()
998,617
531526065773954d044d4e1aafa9b831415e9d49
import os import tools import sad import feat def run_bic(attr): path = tools.set_path() sadname = attr['sad'] featname = attr['mfcc'] bicname = attr['bic'] command = '%s/sbic --segmentation=%s --label=speech %s %s' exe_cmd = command%(path['audioseg'],sadname,featname,bicname) os.system(exe_cmd) return if __name__=='__main__': wavname = '/Users/navidshokouhi/Downloads/unimaquarie/projects/ami_sample/amicorpus/ES2002a/audio/ES2002a.Mix-Headset.wav' basename = tools.gen_uid(wavname) sadname = './%s_sad.txt'%(basename) featname = './%s.mfc'%(basename) bicname = './%s_bic.txt'%(basename) attr = {'audio':wavname, 'sad':sadname, 'mfcc':featname, 'bic':bicname} sad.run_sad(attr) feat.run_mfcc(attr) run_bic(attr)
998,618
b8c41f7f3a8821c87ab32f39966039257534bd37
from flask import Flask, render_template, request, Response, redirect import json from classes.Updater import Updater from classes.DI import DI from classes.Config import Config app = Flask(__name__) # pip install requests # pip install json-rpc @app.route('/bootstrap.css') def bootstrap_css(): f = open('css/bootstrap.min.css', "r") content = f.read() f.close() return Response(content, mimetype='text/css') @app.route('/jquery.js') def jquery_js(): f = open('js/jquery-3.5.1.min.js', "r") content = f.read() f.close() return Response(content, mimetype='text/js') @app.route('/bootstrap.js') def bootstrap_js(): f = open('js/bootstrap.bundle.min.js', "r") content = f.read() f.close() return Response(content, mimetype='text/js') @app.route('/') def hello_world(): blk = DI.get_blockchain() try: data = blk.get_info() return render_template('main.html', data=data['result']) except Exception: return redirect('/config') @app.route('/my') def show_my(): blk = DI.get_blockchain() if not blk.check(): return render_template('error_rpc.html', nav='my') records = blk.get_my() my_records = [] for record in records: record['decoded'] = json.loads(record['value']) record['expires_in'] = round(record['expires_in']/175) for record in records: if 'network' in record['decoded'] and 'type' in record['decoded'] and 'lang' in record['decoded'] and 'tags' in record['decoded']: my_records.append(record) return render_template('my.html', records=my_records, nav='my') @app.route('/my_descriptions') def show_my_descriptions(): blk = DI.get_blockchain() if not blk.check(): return render_template('error_rpc.html', nav='my_descriptions') records = blk.get_my() my_records = [] for record in records: record['decoded'] = json.loads(record['value']) record['expires_in'] = round(record['expires_in']/175) for record in records: if 'network' not in record['decoded'] and 'type' not in record['decoded'] and 'lang' not in record['decoded'] and 'tags' not in record['decoded']: my_records.append(record) return render_template('my_descriptions.html', records=my_records, nav='my_descriptions') @app.route('/submit', methods=['GET']) def show_submit(): return render_template('submit.html', nav='submit') @app.route('/submit', methods=['POST']) def submit(): if 'days' not in request.form: return render_template('error.html', error='No "days" in POST params', code='NONE', backlink='/submit') if 'url' not in request.form: return render_template('error.html', error='No "url" in POST params', code='NONE', backlink='/submit') if 'network' not in request.form: return render_template('error.html', error='No "network" in POST params', code='NONE', backlink='/submit') if 'type' not in request.form: return render_template('error.html', error='No "type" in POST params', code='NONE', backlink='/submit') if 'tags' not in request.form: return render_template('error.html', error='No "tags" in POST params', code='NONE', backlink='/submit') if 'description' not in request.form: return render_template('error.html', error='No "description" in POST params', code='NONE', backlink='/submit') resource = { 'url': request.form['url'], 'network': request.form['network'], 'type': request.form['type'], 'lang': request.form['lang'], 'tags': request.form['tags'], 'description': request.form['description'] } blk = DI.get_blockchain() data = blk.post_resource(resource, int(request.form['days'])) if data['error'] and data['error']['code'] < 0: return render_template('error.html', error=data['error']['message'], code=data['error']['code'], backlink='/my') else: return render_template('ok.html', message='Resource submitted OK', backlink='/my') @app.route('/add/<int:resource_id>', methods=['GET']) def show_add(resource_id): rep = DI.get_repository() resource = rep.read_resource(resource_id) tags = rep.read_tags(resource['network_id'], resource['lang_id'], resource['type_id']) return render_template('add.html', nav='submit', resource=resource, tags_id=tags[0]['id']) @app.route('/add/<int:resource_id>', methods=['POST']) def add(resource_id): if 'days' not in request.form: return render_template('error.html', error='No "days" in POST params', code='NONE', backlink='/add/'+resource_id) if 'url' not in request.form: return render_template('error.html', error='No "url" in POST params', code='NONE', backlink='/add/'+resource_id) if 'description' not in request.form: return render_template('error.html', error='No "description" in POST params', code='NONE', backlink='/add/'+resource_id) resource = { 'url': request.form['url'], 'description': request.form['description'] } blk = DI.get_blockchain() data = blk.post_resource(resource, int(request.form['days'])) if data['error'] and data['error']['code'] < 0: return render_template('error.html', error=data['error']['message'], code=data['error']['code'], backlink='/my') else: return render_template('ok.html', message='Resource submitted OK', backlink='/my') @app.route('/my/edit/<string:resource>', methods=['GET']) def show_edit(resource): blk = DI.get_blockchain() data = blk.show_resource(resource) resourse = data['result'] decoded = json.loads(resourse['value']) resourse.update(decoded) return render_template('edit.html', nav='my', resourse=resourse) @app.route('/my/edit/<string:name>', methods=['POST']) def edit(name): if 'days' not in request.form: return render_template('error.html', error='No "days" in POST params', code='NONE', backlink='/my/edit/'+name) if 'url' not in request.form: return render_template('error.html', error='No "url" in POST params', code='NONE', backlink='/my/edit/'+name) if 'network' not in request.form: return render_template('error.html', error='No "network" in POST params', code='NONE', backlink='/my/edit/'+name) if 'type' not in request.form: return render_template('error.html', error='No "type" in POST params', code='NONE', backlink='/my/edit/'+name) if 'tags' not in request.form: return render_template('error.html', error='No "tags" in POST params', code='NONE', backlink='/my/edit/'+name) if 'description' not in request.form: return render_template('error.html', error='No "description" in POST params', code='NONE', backlink='/my/edit/'+name) resource = { 'url': request.form['url'], 'network': request.form['network'], 'type': request.form['type'], 'lang': request.form['lang'], 'tags': request.form['tags'], 'description': request.form['description'] } blk = DI.get_blockchain() data = blk.edit_resource(name, resource, int(request.form['days'])) if data['error'] and data['error']['code'] < 0: return render_template('error.html', error=data['error']['message'], code=data['error']['code'], backlink='/my') else: return render_template('ok.html', nav='my', message='Resource "'+name+'" edited OK', backlink='/my') @app.route('/my_descriptions/edit/<string:resource>', methods=['GET']) def show_edit_description(resource): blk = DI.get_blockchain() data = blk.show_resource(resource) resourse = data['result'] decoded = json.loads(resourse['value']) resourse.update(decoded) return render_template('descr.html', nav='my_descriptions', resourse=resourse) @app.route('/my_descriptions/edit/<string:name>', methods=['POST']) def edit_description(name): if 'days' not in request.form: return render_template('error.html', error='No "days" in POST params', code='NONE', backlink='/my/edit/'+name) if 'url' not in request.form: return render_template('error.html', error='No "url" in POST params', code='NONE', backlink='/my/edit/'+name) if 'description' not in request.form: return render_template('error.html', error='No "description" in POST params', code='NONE', backlink='/my/edit/'+name) resource = { 'url': request.form['url'], 'description': request.form['description'] } blk = DI.get_blockchain() data = blk.edit_resource(name, resource, int(request.form['days'])) if data['error'] and data['error']['code'] < 0: return render_template('error.html', error=data['error']['message'], code=data['error']['code'], backlink='/my_descriptions') else: return render_template('ok.html', nav='my', message='Resource "'+name+'" edited OK', backlink='/my_descriptions') @app.route('/my/show/<string:resource>') def show(resource): blk = DI.get_blockchain() data = blk.show_resource(resource) resourse = data['result'] resourse['decoded'] = json.loads(resourse['value']) return render_template('resourse.html', nav='my', resourse=resourse) @app.route('/my/delete/<string:resource>') def delete(resource): blk = DI.get_blockchain() data = blk.delete_resource(resource) if data['error'] and data['error']['code'] < 0: return render_template('error.html', error=data['error']['message'], code=data['error']['code'], backlink='/my') else: return render_template('ok.html', message='DELETED OK', backlink='/my') @app.route('/sync') def sync(): rep = DI.get_repository() blk = DI.get_blockchain() upd = Updater(rep, blk) upd.sync() return 'ok' @app.route('/networks') def show_networks(): rep = DI.get_repository() blk = DI.get_blockchain() upd = Updater(rep, blk) upd.check() networks = rep.read_networks() return render_template('list/networks.html', title='Main', nav='tags', networks=networks) @app.route('/langs/<int:network_id>') def show_langs(network_id): rep = DI.get_repository() langs = rep.read_langs(network_id) network = rep.get_network_name_by_id(network_id) return render_template('list/langs.html', title='Main', nav='tags', langs=langs, network_id=network_id, network=network) @app.route('/types/<int:network_id>/<int:lang_id>') def show_types(network_id, lang_id): rep = DI.get_repository() types = rep.read_types(network_id, lang_id) network = rep.get_network_name_by_id(network_id) lang = rep.get_lang_name_by_id(lang_id) return render_template('list/types.html', title='Main', nav='tags', types=types, network_id=network_id, lang_id=lang_id, network=network, lang=lang) @app.route('/tags/<int:network_id>/<int:lang_id>/<int:type_id>') def show_tags(network_id, lang_id, type_id): rep = DI.get_repository() tags = rep.read_tags(network_id, lang_id, type_id) network = rep.get_network_name_by_id(network_id) lang = rep.get_lang_name_by_id(lang_id) type = rep.get_type_name_by_id(type_id) return render_template('list/tags.html', title='Main', nav='tags', tags=tags, network_id=network_id, lang_id=lang_id, type_id=type_id, network=network, lang=lang, type=type) @app.route('/links/<int:network_id>/<int:lang_id>/<int:type_id>/<int:tags_id>') def show_resources(network_id, lang_id, type_id, tags_id): rep = DI.get_repository() resources = rep.read_resources(network_id, lang_id, type_id, tags_id) network = rep.get_network_name_by_id(network_id) lang = rep.get_lang_name_by_id(lang_id) type = rep.get_type_name_by_id(type_id) tag = rep.get_tag_name_by_id(tags_id) return render_template('list/resources.html', title='Main', nav='tags', resources=resources, network_id=network_id, lang_id=lang_id, type_id=type_id, tags_id=tags_id, network=network, lang=lang, type=type, tag=tag) @app.route('/show/<int:network_id>/<int:lang_id>/<int:type_id>/<int:tags_id>/<int:resource_id>') def show_resource(network_id, lang_id, type_id, tags_id, resource_id): rep = DI.get_repository() network = rep.get_network_name_by_id(network_id) lang = rep.get_lang_name_by_id(lang_id) type = rep.get_type_name_by_id(type_id) tag = rep.get_tag_name_by_id(tags_id) tags = rep.read_tags(network_id, lang_id, type_id) resource = rep.read_resource(resource_id) descriptions = rep.read_descriptions(resource_id) return render_template('list/resource.html', title='Main', nav='tags', network_id=network_id, lang_id=lang_id, type_id=type_id, tags_id=tags_id,resource_id=resource_id, network=network, lang=lang, type=type, tag=tag, tags=tags, resource=resource, descriptions=descriptions) @app.route('/search', methods=['GET']) def show_search(): return render_template('list/search.html', nav='search', title='Search', type='description', resources=[]) @app.route('/search', methods=['POST']) def search(): rep = DI.get_repository() blk = DI.get_blockchain() upd = Updater(rep, blk) upd.check() resources = [] if request.form['type'] =='url': resources = rep.search_resources_by_url(request.form['search']) elif request.form['type'] == 'description': resources = rep.search_resources_by_description(request.form['search']) # print(resources) return render_template('list/search.html', nav='search', title='Search', search=request.form['search'], type=request.form['type'], resources=resources) @app.route('/config', methods=['POST']) def post_config(): Config.save_config({ 'host': request.form['host'], 'port': int(request.form['port']), 'user': request.form['user'], 'password': request.form['password'] }) blk = DI.get_blockchain() if blk.check(): result = 'Connection OK =)' else: result = 'FAILED' return render_template('config.html', nav='config', title='Config', result=result, data=request.form) @app.route('/config', methods=['GET']) def show_config(): data = Config.get_config() return render_template('config.html', nav='config', title='Config', data=data) @app.route('/test') def test(): # rep = DI.get_repository() # res = rep.search_resources_by_url('youtube') # print(res) # res = rep.search_resources_by_description('Биткоин') # print(res) # rep = DI.get_repository() # blk = DI.get_blockchain() # upd = Updater(rep, blk) # print(upd.check()) blk = DI.get_blockchain() print(blk.check()) return 'OK' # app.run()
998,619
4a90d79bf379b5d7d04dea6f73c7fc830cf80206
# -*- coding: utf-8 -*- """Color vision deficiency.""" from .. import algebra as alg from ..filters import Filter from ..types import Vector, Matrix from typing import Any, Optional, Dict, Tuple, Callable, TYPE_CHECKING if TYPE_CHECKING: # pragma: no cover from ..color import Color LRGB_TO_LMS = [ [0.178824041258, 0.4351609057000001, 0.04119349692], [0.034556423182, 0.27155382458, 0.038671308360000003], [0.000299565576, 0.0018430896, 0.01467086136] ] INV_LMS_TO_LRGB = [ [8.094435598032373, -13.050431460496931, 11.672058453917328], [-1.024850558664669, 5.401931309674975, -11.361471490598715], [-0.036529747159333145, -0.412162807001268, 69.35132423820858] ] BRETTEL_PROTAN = ( [ [0.0, 4.645203550257065, -35.779576941742434], [0.0, 3.1614562071112173, -5.353537465326408], [0.0, -0.49202224438116643, 69.56547088528569] ], [ [0.0, 4.500783062899838, -32.818059625581405], [0.0, 3.179741536008844, -5.728500319648271], [0.0, -0.4913704825845725, 69.55210571887513] ], [0.0, 0.016813516536000002, -0.344781556122] ) # type: Tuple[Matrix, Matrix, Vector] BRETTEL_DEUTAN = ( [ [2.0757138256610466, 0.0, -21.409141476259606], [1.466463363392979, 0.0, 2.3317439563667293], [-0.22661490011149105, 0.0, 68.30654343816653] ], [ [2.124834794256137, 0.0, -23.323256212281898], [1.4461308480260673, 0.0, 3.124048513815847], [-0.2250635463221503, 0.0, 68.24609126806706] ], [-0.016813516536000002, 0.0, 0.6551784438780001] ) # type: Tuple[Matrix, Matrix, Vector] BRETTEL_TRITAN = ( [ [7.392856536180033, -11.148044821248138, 0.0], [-0.34194012421930653, 3.5501661175064, 0.0], [-4.2050692716448586, 10.89115898202731, 0.0] ], [ [8.0643934576311, -12.42414705871019, 0.0], [-0.9956078228581946, 4.7923119863996835, 0.0], [-0.2150297288038942, 3.3090019545637928, 0.0] ], [0.344781556122, -0.6551784438780001, 0.0] ) # type: Tuple[Matrix, Matrix, Vector] VIENOT_PROTAN = [ [0.11238276122216405, 0.8876172387778362, 5.551115123125783e-17], [0.11238276122216398, 0.8876172387778362, -2.7755575615628914e-17], [0.0040057682730469425, -0.004005768273046939, 1.0] ] VIENOT_DEUTAN = [ [0.2927501142784356, 0.7072498857215644, 1.1102230246251565e-16], [0.2927501142784356, 0.7072498857215644, -5.551115123125783e-17], [-0.022336587034129083, 0.022336587034129093, 1.0] ] VIENOT_TRITAN = [ [1.0000000000000002, 0.1446122433069361, -0.1446122433069363], [3.469446951953614e-17, 0.8592358078045899, 0.14076419219541025], [-4.163336342344337e-17, 0.8592358078045896, 0.14076419219541023] ] MACHADO_PROTAN = { 0: [[1.000000, 0.000000, -0.000000], [0.000000, 1.000000, 0.000000], [-0.000000, -0.000000, 1.000000]], 1: [[0.856167, 0.182038, -0.038205], [0.029342, 0.955115, 0.015544], [-0.002880, -0.001563, 1.004443]], 2: [[0.734766, 0.334872, -0.069637], [0.051840, 0.919198, 0.028963], [-0.004928, -0.004209, 1.009137]], 3: [[0.630323, 0.465641, -0.095964], [0.069181, 0.890046, 0.040773], [-0.006308, -0.007724, 1.014032]], 4: [[0.539009, 0.579343, -0.118352], [0.082546, 0.866121, 0.051332], [-0.007136, -0.011959, 1.019095]], 5: [[0.458064, 0.679578, -0.137642], [0.092785, 0.846313, 0.060902], [-0.007494, -0.016807, 1.024301]], 6: [[0.385450, 0.769005, -0.154455], [0.100526, 0.829802, 0.069673], [-0.007442, -0.022190, 1.029632]], 7: [[0.319627, 0.849633, -0.169261], [0.106241, 0.815969, 0.077790], [-0.007025, -0.028051, 1.035076]], 8: [[0.259411, 0.923008, -0.182420], [0.110296, 0.804340, 0.085364], [-0.006276, -0.034346, 1.040622]], 9: [[0.203876, 0.990338, -0.194214], [0.112975, 0.794542, 0.092483], [-0.005222, -0.041043, 1.046265]], 10: [[0.152286, 1.052583, -0.204868], [0.114503, 0.786281, 0.099216], [-0.003882, -0.048116, 1.051998]] } # type: Dict[int, Matrix] MACHADO_DEUTAN = { 0: [[1.000000, 0.000000, -0.000000], [0.000000, 1.000000, 0.000000], [-0.000000, -0.000000, 1.000000]], 1: [[0.866435, 0.177704, -0.044139], [0.049567, 0.939063, 0.011370], [-0.003453, 0.007233, 0.996220]], 2: [[0.760729, 0.319078, -0.079807], [0.090568, 0.889315, 0.020117], [-0.006027, 0.013325, 0.992702]], 3: [[0.675425, 0.433850, -0.109275], [0.125303, 0.847755, 0.026942], [-0.007950, 0.018572, 0.989378]], 4: [[0.605511, 0.528560, -0.134071], [0.155318, 0.812366, 0.032316], [-0.009376, 0.023176, 0.986200]], 5: [[0.547494, 0.607765, -0.155259], [0.181692, 0.781742, 0.036566], [-0.010410, 0.027275, 0.983136]], 6: [[0.498864, 0.674741, -0.173604], [0.205199, 0.754872, 0.039929], [-0.011131, 0.030969, 0.980162]], 7: [[0.457771, 0.731899, -0.189670], [0.226409, 0.731012, 0.042579], [-0.011595, 0.034333, 0.977261]], 8: [[0.422823, 0.781057, -0.203881], [0.245752, 0.709602, 0.044646], [-0.011843, 0.037423, 0.974421]], 9: [[0.392952, 0.823610, -0.216562], [0.263559, 0.690210, 0.046232], [-0.011910, 0.040281, 0.971630]], 10: [[0.367322, 0.860646, -0.227968], [0.280085, 0.672501, 0.047413], [-0.011820, 0.042940, 0.968881]], } # type: Dict[int, Matrix] MACHADO_TRITAN = { 0: [[1.000000, 0.000000, -0.000000], [0.000000, 1.000000, 0.000000], [-0.000000, -0.000000, 1.000000]], 1: [[0.926670, 0.092514, -0.019184], [0.021191, 0.964503, 0.014306], [0.008437, 0.054813, 0.936750]], 2: [[0.895720, 0.133330, -0.029050], [0.029997, 0.945400, 0.024603], [0.013027, 0.104707, 0.882266]], 3: [[0.905871, 0.127791, -0.033662], [0.026856, 0.941251, 0.031893], [0.013410, 0.148296, 0.838294]], 4: [[0.948035, 0.089490, -0.037526], [0.014364, 0.946792, 0.038844], [0.010853, 0.193991, 0.795156]], 5: [[1.017277, 0.027029, -0.044306], [-0.006113, 0.958479, 0.047634], [0.006379, 0.248708, 0.744913]], 6: [[1.104996, -0.046633, -0.058363], [-0.032137, 0.971635, 0.060503], [0.001336, 0.317922, 0.680742]], 7: [[1.193214, -0.109812, -0.083402], [-0.058496, 0.979410, 0.079086], [-0.002346, 0.403492, 0.598854]], 8: [[1.257728, -0.139648, -0.118081], [-0.078003, 0.975409, 0.102594], [-0.003316, 0.501214, 0.502102]], 9: [[1.278864, -0.125333, -0.153531], [-0.084748, 0.957674, 0.127074], [-0.000989, 0.601151, 0.399838]], 10: [[1.255528, -0.076749, -0.178779], [-0.078411, 0.930809, 0.147602], [0.004733, 0.691367, 0.303900]], } # type: Dict[int, Matrix] def brettel(color: 'Color', severity: float, wings: Tuple[Matrix, Matrix, Vector]) -> None: """ Calculate color blindness using Brettel 1997. https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.496.7153&rep=rep1&type=pdf Probably the only accurate approach for tritanopia, but is more expensive to calculate. """ w1, w2, sep = wings # Convert to LMS lms_c = alg.dot(LRGB_TO_LMS, color[:-1], dims=alg.D2_D1) # Apply appropriate wing filter based on which side of the separator we are on. # Tritanopia filter and LMS to sRGB conversion are included in the same matrix. coords = alg.dot(w2 if alg.dot(lms_c, sep) > 0 else w1, lms_c, dims=alg.D2_D1) if severity < 1: color[:-1] = [alg.lerp(a, b, severity) for a, b in zip(color[:-1], coords)] else: color[:-1] = coords def vienot(color: 'Color', severity: float, transform: Matrix) -> None: """ Calculate color blindness using the Viénot, Brettel, and Mollon 1999 approach, best for protanopia and deuteranopia. Can be used for tritanopia, but will be not be accurate. Based on http://vision.psychol.cam.ac.uk/jdmollon/papers/colourmaps.pdf. Tritanopia is inferred from the paper as they do not actually go through the logic, the difference is we use LMS red instead of LMS blue. Covered here as well: https://ixora.io/projects/colorblindness/color-blindness-simulation-research/. Though they use Hunt-Pointer-Estevez transformation, but here they argue that Smith and Pokorny is still probably the way to go: https://daltonlens.org/understanding-cvd-simulation/#From-CIE-XYZ-to-LMS. Our matrices are precalculated, so all we need to do is dot and go unless we want something lower than severity 1, then we interpolate against the original color. """ coords = alg.dot(transform, color[:-1], dims=alg.D2_D1) if severity < 1: color[:-1] = [alg.lerp(c1, c2, severity) for c1, c2 in zip(color[:-1], coords)] else: color[:-1] = coords def machado(color: 'Color', severity: float, matrices: Dict[int, Matrix]) -> None: """ Machado approach to protanopia, deuteranopia, and tritanopia. https://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html#Reference Decent results for protanopia and deuteranopia, but tritanopia is really only an approximation. They don't even bother to show tritanopia results. """ # Calculate the approximate severity severity *= 10 severity1 = int(severity) # Filter the color according to the severity m1 = matrices[severity1] coords = alg.dot(m1, color[:-1], dims=alg.D2_D1) # If severity was not exact, and it also isn't max severity, # let's calculate the next most severity and interpolate # between the two results. if severity1 != severity and severity1 < 10: # Calculate next highest severity severity2 = severity1 + 1 # Calculate the weight weight = (severity - severity1) # Get the next severity in the list m2 = matrices[severity2] # It is actually stated that the two matrices should be interpolated, # but it ends up being faster just modifying the color on both the high # and low matrix and interpolating the color than interpolating the matrix # and then applying it to the color. The results are identical as well. coords2 = alg.dot(m2, color[:-1], dims=alg.D2_D1) coords = [alg.lerp(c1, c2, weight) for c1, c2 in zip(coords, coords2)] # Return the altered color color[:-1] = coords class Protan(Filter): """Protanopia filter.""" NAME = "protan" ALLOWED_SPACES = ('srgb-linear',) BRETTEL = BRETTEL_PROTAN VIENOT = VIENOT_PROTAN MACHADO = MACHADO_PROTAN def __init__(self, severe: str = 'vienot', anomalous: str = 'machado') -> None: """Initialize.""" self.severe = severe self.anomalous = anomalous def brettel(self, color: 'Color', severity: float) -> None: """Tritanopia vision deficiency using Brettel method.""" brettel(color, severity, self.BRETTEL) def vienot(self, color: 'Color', severity: float) -> None: """Tritanopia vision deficiency using Viénot method.""" vienot(color, severity, self.VIENOT) def machado(self, color: 'Color', severity: float) -> None: """Tritanopia vision deficiency using Machado method.""" machado(color, severity, self.MACHADO) def select_filter(self, method: str) -> Callable[..., None]: """Select the best filter.""" if method == 'brettel': return self.brettel elif method == 'vienot': return self.vienot elif method == 'machado': return self.machado else: raise ValueError("Unrecognized CVD filter method '{}'".format(method)) def get_best_filter(self, method: Optional[str], max_severity: bool) -> Callable[..., None]: """Get the best filter based on the situation.""" if method is None: method = self.severe if max_severity else self.anomalous return self.select_filter(method) def filter(self, color: 'Color', amount: Optional[float] = None, **kwargs: Any) -> None: # noqa: A003 """Filter the color.""" method = kwargs.get('method') # type: Optional[str] amount = alg.clamp(1 if amount is None else amount, 0, 1) self.get_best_filter(method, amount == 1)(color, amount) class Deutan(Protan): """Deuteranopia filter.""" NAME = 'deutan' BRETTEL = BRETTEL_DEUTAN VIENOT = VIENOT_DEUTAN MACHADO = MACHADO_DEUTAN class Tritan(Protan): """Deuteranopia filter.""" NAME = 'tritan' BRETTEL = BRETTEL_TRITAN VIENOT = VIENOT_TRITAN MACHADO = MACHADO_TRITAN def __init__(self, severe: str = 'brettel', anomalous: str = 'brettel', **kwargs: Any) -> None: """Initialize.""" super().__init__(severe, anomalous)
998,620
1266e9ad3afb8430c5ed0316d59ddc7e113b8a2d
# Dependencies from flask import Flask,render_template, request, jsonify import traceback import pandas as pd import numpy as np import sklearn import sys import pickle from models import nettoyage, trouve_journal from sklearn.feature_extraction.text import TfidfVectorizer app = Flask(__name__) @app.route('/', methods=['POST','GET']) def envoi(): return render_template("page.html") @app.route('/predict', methods=['POST']) def predict(): if lr: try: #json_2 = json_[0]["titre"] mon_article = request.form['monArticle'] article = nettoyage(mon_article) query = tfidf_.transform([article]) prediction = trouve_journal(lr.predict(query)) probas = lr.predict_proba(query)[0] proba = [round(x,4) for x in probas] return render_template("result.html", name=prediction, a_traduire=mon_article, probas=proba) # return jsonify({'prediction': prediction}) except Exception as e: print("Une erreur s'est produite") return render_template("page.html") else: print ('Train the model first') return ('No model here to use') if __name__ == '__main__': try: port = int(sys.argv[1]) # This is for a command-line input except: port = 12345 # If you don't provide any port the port will be set to 12345 with open("./src/models_pickle/file_model_fitted_Knn.pkl", 'rb') as file_model: lr = pickle.load(file_model) print ('Model loaded') with open("./src/models_pickle/file_tfidf.pkl", 'rb') as tfidf: tfidf_ = pickle.load(tfidf) print ('tfidf transfo loaded') app.run(port=12345, debug=True)
998,621
f8926b8296fa58c39ce4621a9cc42063239cb0ff
#https://docs.aws.amazon.com/code-samples/latest/catalog/python-ec2-ec2_basics-ec2_setup.py.html import sys import boto3 from botocore.exceptions import ClientError import logging logger = logging.getLogger(__name__) # instantiation of the ec2 client ec2 = boto3.resource('ec2') def create_instance( image_id, instance_type, key_name, security_group_names=None): """ Creates a new Amazon EC2 instance. The instance automatically starts immediately after it is created. The instance is created in the default VPC of the current account. :param image_id: The Amazon Machine Image (AMI) that defines the kind of instance to create. The AMI defines things like the kind of operating system, such as Amazon Linux, and how the instance is stored, such as Elastic Block Storage (EBS). :param instance_type: The type of instance to create, such as 't2.micro'. The instance type defines things like the number of CPUs and the amount of memory. :param key_name: The name of the key pair that is used to secure connections to the instance. :param security_group_names: A list of security groups that are used to grant access to the instance. When no security groups are specified, the default security group of the VPC is used. :return: The newly created instance. """ try: instance_params = { 'ImageId': image_id, 'InstanceType': instance_type, 'KeyName': key_name } if security_group_names is not None: instance_params['SecurityGroups'] = security_group_names instance = ec2.create_instances(**instance_params, MinCount=1, MaxCount=1)[0] logger.info("Created instance %s.", instance.id) except ClientError: logging.exception( "Couldn't create instance with image %s, instance type %s, and key %s.", image_id, instance_type, key_name) raise else: return instance # Make sure the values you are entering are present if not create them. create_instance('ami-09e67e426f25ce0d7','t2.micro','sample-instance',['Sample instance']) print("OK")
998,622
1a4ef0731fe8151b66fe07193f1109251162ed1c
import time import random from chatbot_utils.redict import ReDict import matplotlib.pyplot as plt random.seed(time.time()) def plot(xdata, ydata, xlabel=None, ylabel=None, legend=None): for y in ydata: plt.plot(xdata, y) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if legend: plt.legend(legend) plt.show() def test_redict_speed(num_items, start=0, dictobj=None): if dictobj is None: d = ReDict() else: d = dictobj num_regexs = 1 for i in range(start, start + num_items): d["(f)(o)o? %d|b((a)(r)*) %d" % (i, i)] = i compile_start = time.time() d.compile() compile_time = time.time() - compile_start if num_items == 0: num_get_tests = 100 else: num_get_tests = num_items / 10 get_time = 0.0 for _ in range(num_get_tests): index = random.randrange(0, len(d)) text = "barrr %d" % index get_start = time.time() value = d[text] get_time += time.time() - get_start return compile_time, get_time / float(num_get_tests) step = 1000 max_value = 25000 iterations = max_value / step compile_times = [] get_times = [] d = ReDict() for i in range(iterations): compile_time, get_time = test_redict_speed(step, step * i, d) compile_times.append(compile_time) get_times.append(get_time) # worst caseno, chunking, 7.15 secs to compile with 25000 groups # better, chunking 600, 4.5 secs with 25000 groups # best (!), chunking 75, builtin 're' lib, 1.65 secs with 25000 groups test_values = range(0, max_value, step) plot(test_values, [compile_times, get_times], xlabel="Number of items in ReDict instance", ylabel="Time in seconds", legend=[ 'Time to compile ReDict instance', 'Time to fetch item from compiled ReDict' ] )
998,623
9219074873b1a732562c6f3b02ede54e37991e04
i=3 j=5 sum=0 while i<1000: sum+=i i+=3 while j<1000: if j%3!=0: sum+=j j+=5 print(sum)
998,624
cb7105c03b6af352b8d823f4b534a818ec291458
######################################################## ######################################################## ############# Automated music for work ################# ######################################################## ######################################################## ######################################################## import pyautogui as pg import random import time # Precaution pg.FAILSAFEEXCPETION = True pg.PAUSE = 1.0 # Music Street_Fighter_Mas = 'https://www.youtube.com/watch?v=LdyabrdFMC8' Blood_on_the_Radio = 'https://www.youtube.com/watch?v=5WKmOS_cW1w' Vintage = 'https://www.youtube.com/watch?v=cVxIqlzdx98' Beginnings = 'https://www.youtube.com/watch?v=7kQ1llzPiB4' Bossa_uh = 'https://www.youtube.com/watch?v=FSnuF1FPSIU' Accordian = 'https://www.youtube.com/watch?v=4KmEc6zFmgo' Iron_Man = 'https://www.youtube.com/watch?v=F01UTYg79KY' My_Favorite_Things = 'https://www.youtube.com/watch?v=rqpriUFsMQQ' Mambo = 'https://www.youtube.com/watch?v=K0KyKOTLqrQ' Dont_Worry = 'https://www.youtube.com/watch?v=RxsBc5p-dPU' Honda = 'https://www.youtube.com/watch?v=v96sokSHeT4' Mercy = 'https://www.youtube.com/watch?v=CShUISLYLGY' Skiptracing = 'https://www.youtube.com/watch?v=iLk4QIOjL1s' Breathe_Underwater = 'https://www.youtube.com/watch?v=p3tErZ5LUJY' Memes = 'https://www.youtube.com/watch?v=CAMWdvo71ls' Cancion = 'https://www.youtube.com/watch?v=ahtMpUhoj9s' Say_My_Name = 'https://www.youtube.com/watch?v=sQgd6MccwZc' Hooked = 'https://www.youtube.com/watch?v=NrI-UBIB8Jk' Crazy = 'https://www.youtube.com/watch?v=-N4jf6rtyuw' Killing_Me_Softly = 'https://www.youtube.com/watch?v=H-RBJNqdnoM' Valerie = 'https://www.youtube.com/watch?v=imEkaRJuN6Q' Telephone = 'https://www.youtube.com/watch?v=77R1Wp6Y_5Y' Life_Boat = 'https://www.youtube.com/watch?v=7NoPDyOEYkQ' Calveritas = 'https://www.youtube.com/watch?v=YiYemWhdoIo' Summer_Breeze = 'https://www.youtube.com/watch?v=T88fbHOmvRk' I = 'https://www.youtube.com/watch?v=8aShfolR6w8' Feels_Like_Summer = 'https://www.youtube.com/watch?v=F1B9Fk_SgI0' Hello = 'https://www.youtube.com/watch?v=6jBhlyyB30o' Eletric_Lady = 'https://www.youtube.com/watch?v=LPFgBCUBMYk' Italian_Restaurant = 'https://www.youtube.com/watch?v=Hxx8IWIvKg0' Hey_Jude = 'https://www.youtube.com/watch?v=mQER0A0ej0M' My_Way = 'https://www.youtube.com/watch?v=qQzdAsjWGPg' Easy_Baby = 'https://www.youtube.com/watch?v=6AZE0lbEkKw' Because_Me = 'https://www.youtube.com/watch?v=NynnApj2smY' Love_The_Moon = 'https://www.youtube.com/watch?v=JhY9jkUVLi4' What_Do = 'https://www.youtube.com/watch?v=wcGjUEyaZ0Y' Schizoid_Man = 'https://www.youtube.com/watch?v=JLstJH23p7k' Flower_Blooms = 'https://www.youtube.com/watch?v=1gAHhLb6tjA' Christmas_Miracle = 'https://www.youtube.com/watch?v=OQ5rI461KNE' Love_You_So = 'https://www.youtube.com/watch?v=Q2QUngVGxmE' Roses = 'https://www.youtube.com/watch?v=sZ1vT0aPcYE' Brandy = 'https://www.youtube.com/watch?v=DVx8L7a3MuE' Father_and_Son = 'https://www.youtube.com/watch?v=txDMiD8ia50' Roundabout = 'https://www.youtube.com/watch?v=DwPWGUhEtP0' South_of_the_River = 'https://www.youtube.com/watch?v=nEJk2FJJ18c' Lady_Brown = 'https://www.youtube.com/watch?v=Y9QHak8h1AQ' Man_in_Mirror = 'https://www.youtube.com/watch?v=Z9NYDgbKsBE' Bites_the_Dust = 'https://www.youtube.com/watch?v=1tLYYSofs3U' Promiscuous = 'https://www.youtube.com/watch?v=0J3vgcE5i2o' American_Boy = 'https://www.youtube.com/watch?v=Ic5vxw3eijY' K = 'https://www.youtube.com/watch?v=UqyT8IEBkvY' Heart_No_Chance = 'https://www.youtube.com/watch?v=O5mcLhuUfG0' That_Somebody = 'https://www.youtube.com/watch?v=Z5338B36j0M' Giorno = 'https://www.youtube.com/watch?v=U0TXIXTzJEY' Falling_in_Love = 'https://www.youtube.com/watch?v=vGJTaP6anOU' Close_to_You = 'https://www.youtube.com/watch?v=iFx-5PGLgb4' Deep_your_Love = 'https://www.youtube.com/watch?v=XpqqjU7u5Yc' Moanin = 'https://www.youtube.com/watch?v=__OSyznVDOY' Georgia = 'https://www.youtube.com/watch?v=QL3EZwSJAh0' Carnival = 'https://www.youtube.com/watch?v=MKk1u5RMTn4' My_Shot = 'https://www.youtube.com/watch?v=Ic7NqP_YGlg' Rise_Against = 'https://www.youtube.com/watch?v=6nQCxwneUwA' Awake = 'https://www.youtube.com/watch?v=fT2LBOUIdv8' Thugz_Remix = 'https://www.youtube.com/watch?v=q6WPhuRoSOo' Mississippi = 'https://www.youtube.com/watch?v=LJ25-U3jNWM' Until = 'https://www.youtube.com/watch?v=M4x-GJNmgVs' Children_of_Sanchez = 'https://www.youtube.com/watch?v=28ds69kqY8s' Breezin = 'https://www.youtube.com/watch?v=RcDD57K-k5I' Devil_Sonata = 'https://www.youtube.com/watch?v=z7rxl5KsPjs' Howling_Castle = 'https://www.youtube.com/watch?v=UwxatzcYf9Q' One_Nation = 'https://www.youtube.com/watch?v=3WOZwwRH6XU' Fly = 'https://www.youtube.com/watch?v=1tUzT84HSzE' Unstoppable = 'https://www.youtube.com/watch?v=YFic-xaLsPs' Lose_it = 'https://www.youtube.com/watch?v=NhK8Ehv6aPI' Your_Soul = 'https://www.youtube.com/watch?v=BjTbELSqgYc' Feel_Good = 'https://www.youtube.com/watch?v=HyHNuVaZJ-k' The_Prayer = 'https://www.youtube.com/watch?v=uUp_T71-g34' Four_semi = 'https://www.youtube.com/watch?v=7r5g7fVP4Xg' Awake_Alive = 'https://www.youtube.com/watch?v=fT2LBOUIdv8' Runaway = 'https://www.youtube.com/watch?v=x-FkJ5FzWgs' Re_do = 'https://www.youtube.com/watch?v=6rgbYAZeZJI' Young_American = 'https://www.youtube.com/watch?v=ScVi_L817ec' Boombayah = 'https://www.youtube.com/watch?v=bwmSjveL3Lc' Stan = 'https://www.youtube.com/watch?v=UQRmA2v2D9s' Falling_Down = 'https://www.youtube.com/watch?v=oCdA5coSRfA' La_Campanella = 'https://www.youtube.com/watch?v=jERzLseoAOM' Zigeunerweisn = 'https://www.youtube.com/watch?v=ufIlOXXMqs0' Violoncello = 'https://www.youtube.com/watch?v=d3ORA6x-Wk8' Wishin = 'https://www.youtube.com/watch?v=zLGwyJamWEI' Pray_Understand = 'https://www.youtube.com/watch?v=MT6QfqB0n-E' Spain = 'https://www.youtube.com/watch?v=sEhQTjgoTdU' Virtual_Insanity = 'https://www.youtube.com/watch?v=GieQq3eWSnE' Boogie = 'https://www.youtube.com/watch?v=PhD58dP9kq8' Every_Woman = 'https://www.youtube.com/watch?v=IzUIpOr1ssM' Music = [Street_Fighter_Mas, Blood_on_the_Radio, Vintage, Beginnings, Bossa_uh, Accordian, Iron_Man, My_Favorite_Things, Mambo, Dont_Worry, Honda, Mercy, Skiptracing, Breathe_Underwater, Memes, Cancion, Say_My_Name, Hooked, Crazy, Killing_Me_Softly, Valerie, Telephone, Life_Boat, Calveritas, Summer_Breeze, I, Feels_Like_Summer, Hello, Eletric_Lady, Italian_Restaurant, Hey_Jude, My_Way, Easy_Baby, Because_Me, Love_The_Moon, What_Do, Schizoid_Man, Flower_Blooms, Love_You_So, Roses, Brandy, Father_and_Son, Roundabout, Lady_Brown, Man_in_Mirror, Bites_the_Dust, Promiscuous, American_Boy, K, Heart_No_Chance, That_Somebody, Giorno, Falling_in_Love, Close_to_You, Deep_your_Love, Moanin, Georgia, Carnival, My_Shot, Rise_Against, Awake, Thugz_Remix, Mississippi, Until, Children_of_Sanchez, Breezin, Devil_Sonata, Howling_Castle, One_Nation, Fly, Unstoppable, Lose_it, Your_Soul, Feel_Good, The_Prayer, Four_semi, Awake_Alive, Runaway, Re_do, Young_American, Boombayah, Stan, Falling_Down, La_Campanella, Zigeunerweisn, Violoncello, Wishin, Pray_Understand, Spain, Virtual_Insanity, Boogie, Every_Woman, ] # Set up Music Choice def play_music(): Music_choice = random.randrange(len(Music)) # Open youtube vid with music from list pg.hotkey('winleft') pg.typewrite('chrome\n') pg.press('enter') pg.typewrite(Music[Music_choice]) # Random choice of songs in the list pg.press('enter') # Change the song def change_music(): text = pg.confirm(text ='Change song?', title ='Change Music', buttons =['Yes', 'No', 'Quit']) # Change song button if text == 'Yes': pg.hotkey('alt', 'f4') # alt f4 short-cut for closing tabs play_music() change_music() elif text == 'No': pg.hotkey('winleft', 'down') else: pg.hotkey('alt', 'f4') # Play a new song def play_new_song(): time.sleep(180) # Play new song button appears after 3 minutes confirmation = pg.confirm(text = 'Next Song?', title = 'Next Song', buttons = ['Yes', 'No',]) # New song button if confirmation == 'Yes': pg.hotkey('alt', 'f4') play_music() change_music() play_new_song() elif confirmation == 'No': pg.hotkey('winleft', 'down') else: quit() play_music() change_music() play_new_song() # Turn volume on if off pg.hotkey('volumeup')
998,625
3192285e57aeb8cec16c78edc2e3312bf5f252c4
# Generated by Django 3.0.5 on 2020-08-07 07:04 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('pages', '0009_auto_20200806_2122'), ('pages', '0011_merge_20200807_1447'), ] operations = [ ]
998,626
5ac80c6af41c9ef9153628dbbb83638abb1cf62b
import os import csv import sys from filelock import Timeout, FileLock DATASET_NAME = 'echo-msk' curr_dir_path = os.path.dirname(os.path.realpath(__file__)) dataset_folder_path = os.path.join(curr_dir_path, DATASET_NAME+"-dataset/") def change_paths(find, replace): for item in os.listdir(dataset_folder_path): csv_path = os.path.join(dataset_folder_path, item) if csv_path.split(".")[-1] != "csv": continue f = open(csv_path, "r") parts = list(csv.reader(f)) f.close() f = open(csv_path, "w") writer = csv.writer(f) for row in parts: row[0] = row[0].replace(find, replace) writer.writerow(row) f.close() if __name__ == '__main__': if len(sys.argv) < 3: print('USAGE: python change_paths.py <find_path> <replacement>') else: change_paths(sys.argv[1], sys.argv[2])
998,627
802e6d40b5f1b1b305196a49ebb55b629e750550
import datetime import logging.config import binascii import json import re from os import linesep import argparse import psycopg2 from autobahn.twisted.websocket import ( WebSocketServerProtocol, WebSocketServerFactory, ) from twisted.protocols import basic from twisted.internet import stdio from config import config from utils.deconstruct_package import ( deconstruct_package, PackageHandler, ) from utils.type_convert import bytes_to_int logging.config.fileConfig("config/logging_config.ini") logger = logging.getLogger(__name__) class UserInputProtocol(basic.LineReceiver): """Handle input bytestream""" delimiter = linesep.encode("utf8") def __init__(self, callback): self.callback = callback def lineReceived(self, line): self.callback(line) class DebugPackageServerProtocol(WebSocketServerProtocol): def onConnect(self, request): logger.debug("Client connecting: {0}".format(request.peer)) self.factory.protocol_pool.append(self) def onOpen(self): logger.debug("WebSocket connection open.") def onClose(self, wasClean, code, reason): logger.debug("WebSocket connection closed: {0}".format(reason)) self.factory.protocol_pool.remove(self) class DebugPackageServerFactory(WebSocketServerFactory): protocol = DebugPackageServerProtocol def __init__(self, interface, server_port): """ :param interface: specific which interface to listen :type interface: string """ self.interface = interface self.server_port = server_port stdio.StandardIO(UserInputProtocol(self.user_input_received)) super(DebugPackageServerFactory, self).__init__( "ws://127.0.0.1:{port}".format(port=config.WEB_SOCKET_PORT) ) self.protocol_pool = [] self.data_temp = None self.establish_db_conn() def establish_db_conn(self): conn = psycopg2.connect( dbname=config.DB_NAME, user=config.DB_USERNAME, host=config.DB_HOST, ) self.db_conn = conn def store_package(self, data): """ Store a package :param data: package content :type data: string """ insert_statement = "insert into car_package(row_data) values ({})".format( psycopg2.Binary(data), ) if self.db_conn.closed: self.establish_db_conn() cursor = self.db_conn.cursor() cursor.execute(insert_statement.encode('ascii')) self.db_conn.commit() def user_input_received(self, data): """ Process user input :param data: :type data: bytes """ # data example: 120.026.081.035.04020-192.168.003.192.60036: ##TTUJJJ563EM063163, data = self.package_gateway(data) if data: self.broadcast_packages(data) def package_gateway(self, data): pattern = re.compile(b"(?P<source_host>.+?)\.(?P<source_port>\d{5})-(?P<dest_host>.+?)\.(?P<dest_port>\d{5}): (?P<tcp_payload>.+)") match = pattern.match(data) if match: tcp_dic = match.groupdict() logger.debug('Tcp package: {}'.format(tcp_dic)) self.sender = self.get_sender(tcp_dic) tcp_payload = tcp_dic['tcp_payload'] # Filter out invalid package, usually it's because those poor souls # put the gateway address in the browser's address bar, which cause # us to receive some http packages. if tcp_payload[:2] != b'##': logger.info("Receive a http package send by those poor souls") logger.info('Tcp payload: {}'.format(tcp_payload)) return package = deconstruct_package(tcp_payload) if len(package.payload) < package.length: # There is a bug in the program: since the tcp package are # received as binary data, if there is new line character in # tcp package, the package will be truncated to two and send to # input handler seperately. # # Since this problem is caused by binary security, changing the # separator cant't solve it. # # So we temporary deposit truncated packages and connect them # later on. logger.info("A truncated package: {}".format(package.payload)) self.data_temp = package.raw_data return return self.process_package(package) else: if self.data_temp is None: logger.info("Receive a http package send by those poor souls") logger.info('Tcp payload: {}'.format(data)) return self.data_temp = self.data_temp + b'\n' + data package = deconstruct_package(self.data_temp) if len(package.payload) < package.length: logger.info("A truncated package: {}".format(package.payload)) return return self.process_package(package) def broadcast_packages(self, data): for protocol in self.protocol_pool: protocol.sendMessage(data, False) def get_sender(self, package): """ Get sender :param package: :type package: dict :rtype: string """ if int(package["dest_port"]) == self.server_port: return "client" return "server" def process_package(self, package): self.store_package(package.raw_data) package["sender"] = self.sender package_handler = PackageHandler() package_type, timestamp = package_handler.get_package_type(package) vin = package['unique_code'] package["package_type"] = package_type if timestamp: conn = package_handler.redis_conn key = "package_type:{}".format(vin) conn.hset(key, timestamp, package_type) conn.expire(key, 10) package["datetime"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") package["vin"] = vin.decode() def split_str_by_step(string, step): for i in range(0, len(string), step): yield string[i:i + step] for k, v in package.items(): if isinstance(v, int): v = bytes([v]) if isinstance(v, str): v = v.encode("utf8") continue v = binascii.hexlify(v).decode("ascii") v = list(split_str_by_step(v, 2)) v = ' '.join(v) package[k] = v data = json.dumps(package).encode("ascii") return data def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "-i", "--interface", type=str, help="specific which interface to listen", ) parser.add_argument( "-p", "--port", type=int, help="specific which interface to listen", ) args = parser.parse_args() return args def main(): args = parse_args() from twisted.internet import reactor factory = DebugPackageServerFactory(args.interface, args.port) factory.protocol = DebugPackageServerProtocol reactor.listenTCP(config.WEB_SOCKET_PORT, factory) reactor.run() if __name__ == "__main__": main()
998,628
76472c7222898b8576d25858dba37342a21ae6cb
# -*- coding: utf-8 -*- """ Created on Tue Mar 16 15:08:32 2021 @author: dingxu """ import os listroute = [0 for i in range(22)] #https://nadc.china-vo.org/psp/csp/calibration/dark/-35c/20200204/ #listroute[0] = 'https://nadc.china-vo.org/psp/csp/2020CSP/20201122/AutoFlat20201122/' listroute[0] = 'https://nadc.china-vo.org/psp/csp/calibration/dark/-40c/20201224/' listroute[1] = 'https://nadc.china-vo.org/psp/csp/calibration/dark/-40c/20201202/' listroute[2] = 'https://nadc.china-vo.org/psp/csp/calibration/dark/-35c/20200204/' listpath = [0 for i in range(22)] #listpath[0] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\20201122\\flat20201122\\' listpath[0] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\calibration\\dark\\-40c\\20201224\\' listpath[1] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\calibration\\dark\\-40c\\20201202\\' listpath[2] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\calibration\\dark\\-35c\\20200204\\' #for i in range(22): # listroute[i] = 'https://nadc.china-vo.org/psp/csp/2020CSP/202012'+str(10+i)+'/AutoFlat202012'+str(10+i)+'/' # #for i in range(22): # listpath[i] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\202012'+str(10+i)+'\\flat202012'+str(10+i)+'\\' # for i in range(3): cmd = 'python downbias.py'+' '+listroute[i]+' '+listpath[i] os.system(cmd) #dlistroute = [0 for i in range(22)] ##listroute[0] = 'https://nadc.china-vo.org/psp/csp/2020CSP/20201122/AutoFlat20201122/' ##dlistroute[0] = 'https://nadc.china-vo.org/psp/csp/2020CSP/20201223/CSP-VAR-0645/' #dlistpath = [0 for i in range(22)] ##dlistpath[0] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\20201223\\CSP-VAR-0645\\' # # #for i in range(22): # dlistroute[i] = 'https://nadc.china-vo.org/psp/csp/2020CSP/202012'+str(22+i)+'/CSP-VAR-0645'+'/' # #for i in range(22): # dlistpath[i] = 'E:\\shunbianyuan\\data\\xingmingdata\\downdata\\202012'+str(22+i)+'\\CSP-VAR-0645'+'\\' # # #for i in range(12): # cmd = 'python downdata.py'+' '+dlistroute[i]+' '+dlistpath[i] # os.system(cmd)
998,629
f4a08702b6a25c2c191f8d1c7aa133e7e8016719
mod_type = "cover" nav1 = "" nav2 = "" nav3 = "" headline = "" background_image = "" text = ""
998,630
49c4ee60e360a88910e83196c1ec7e3021117564
from typing import TYPE_CHECKING from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from app.db.base_class import Base from .search_category import Search_Category if TYPE_CHECKING: from .search_category import Search_Category # noqa: F401 class Cse(Base): id = Column(Integer, primary_key=True, index=True) url = Column(String, index=True) # search_category = relationship("Search_Category", back_populates="cses") # search_category_id = Column(Integer, ForeignKey("search_category.id")) cse = relationship("Search", secondary=Search_Category, back_populates="search_categories")
998,631
9db7d56b828f10c663f8441e76353e08c2e8307e
import boto3 from dotenv import load_dotenv import os load_dotenv() db_connection = boto3.client( 'dynamodb', aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'), aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY') ) tables = db_connection.list_tables()['TableNames'] if 'Movies' in tables: print(True) else: print(False)
998,632
962b5f359a6ab494ba575d2415ad60a42d2234d3
from flask import Flask, render_template, request, redirect, session from flask import flash from flask_bcrypt import Bcrypt import re from mysqlconnection import connectToMySQL app = Flask(__name__) app.secret_key="Super secret!" bcrypt=Bcrypt(app) @app.route("/") def login_and_registration_page(): print("*"*80) mysql = connectToMySQL('belt_db') print("Connected to our database!") return render_template("main.html") EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$') PASSWORD_REGEX = re.compile(r'^(?=.*?\d)(?=.*?[A-Z])(?=.*?[a-z])[A-Za-z\d]{8,15}$') @app.route("/register", methods=["POST"]) def validate_registration(): print("*"*80) is_valid=True if len(request.form['fname']) < 2: is_valid=False flash("First name must contain at least two letters and contain only letters") elif (request.form['fname']).isalpha() == False: is_valid=False flash("First name must contain at least two letters and contain only letters") if len(request.form['lname']) < 2: is_valid=False flash("Last name must contain at least two letters and contain only letters") elif (request.form['lname']).isalpha() == False: is_valid=False flash("Last name must contain at least two letters and contain only letters") if not EMAIL_REGEX.match(request.form['email']): is_valid=False flash("Invalid email address") if not PASSWORD_REGEX.match(request.form["password"]): is_valid=False flash("Password must contain a number, a capital letter, and be between 8-15 characters") if not (request.form["password"]) == (request.form["confirm_password"]): is_valid=False flash("Passwords must match") db = connectToMySQL("belt_db") query = "SELECT * FROM users WHERE email=%(em)s;" data = { "em": request.form['email'] } result=db.query_db(query, data) print(result) if len(result) != 0: is_valid=False flash("Email is already taken!") if not is_valid: return redirect(('/')) else: print(f"Name: {request.form['fname']}") print(f"Alias: {request.form['lname']}") print(f"Email: {request.form['email']}") print(f"Password: {request.form['password']}") # GENERATE PASSWORD HASH bcrypt.generate_password_hash(request.form["password"]) print(bcrypt.generate_password_hash) pw_hash=bcrypt.generate_password_hash(request.form["password"]) print(pw_hash) db = connectToMySQL("belt_db") query = "INSERT INTO users (first_name, last_name, email, pw_hash, created_at, updated_at) VALUES (%(fn)s, %(ln)s, %(em)s, %(pwh)s, NOW(), NOW());" data = { "fn": request.form["fname"], "ln": request.form["lname"], "em": request.form["email"], "pwh": pw_hash } db.query_db(query, data) print("*"*80) db = connectToMySQL("belt_db") query = "SELECT * FROM users WHERE email=%(em)s;" data = { "em": request.form["email"] } result=db.query_db(query, data) print(result) print(data) session['user_id']=result[0]['user_id'] return redirect("/dashboard") @app.route('/login', methods=['POST']) def login(): is_valid=True db = connectToMySQL("belt_db") query = "SELECT * FROM users WHERE email=%(em)s;" data = { "em": request.form["email"] } result=db.query_db(query, data) print(result) print(data) # TO VERIFY USERS PW IN DB, COMPARE PASSWORDS BY PROVIDING THE HASH AS THE 1ST ARGUMENT AND THE PW TO BE CHECKED AS THE 2ND # bcrypt.check_password_hash(hashed_password, password_string) if len(result) > 0: if bcrypt.check_password_hash(result[0]['pw_hash'], request.form['pw_hash']): session['user_id'] = result[0]['user_id'] return redirect("/dashboard") flash("You could not be logged in") return redirect("/") @app.route('/dashboard') def dashboard(): print("*"*80) if 'user_id' in session: print('key exists!') print(session['user_id']) else: print("key 'user_id' does NOT exist") if 'user_id' not in session: return redirect(('/')) db = connectToMySQL("belt_db") query = "SELECT first_name FROM users WHERE user_id=%(id)s;" data = { "id": session['user_id'] } result = db.query_db(query, data) print(result) print("*"*80) db = connectToMySQL("belt_db") query = ("SELECT * FROM jobs;") jobs = db.query_db(query, data) print(jobs) return render_template("dashboard.html", result=result, jobs=jobs) @app.route('/jobs/new') def add_job(): print("*"*80) db = connectToMySQL("belt_db") query = "SELECT first_name FROM users WHERE user_id=%(id)s;" data = { "id": session['user_id'] } result = db.query_db(query, data) print(result) return render_template('jobs_new.html', result=result) @app.route('/add_job', methods=['POST']) def add_to_jobs(): print("*"*80) is_valid=True if len(request.form['job_title']) < 3: is_valid=False flash("Job Title must contain at least 3 characters") if len(request.form['job_description']) < 3: is_valid=False flash("Job description must contain at least 3 characters") if len(request.form['job_location']) < 3: is_valid=False flash("Job location must contain at least 3 characters") if not is_valid: return redirect('/jobs/new') else: db = connectToMySQL("belt_db") query = "INSERT INTO jobs (created_by_user_id, job_title, job_location, job_description, created_at, updated_at) VALUES (%(cbid)s, %(jt)s, %(jl)s, %(jd)s, NOW(), NOW());" print(session['user_id']) data = { "cbid": session['user_id'], "jt": request.form["job_title"], "jl": request.form['job_location'], "jd": request.form['job_description'] } db.query_db(query, data) return redirect('/dashboard') @app.route('/jobs/<id>') def view_job(id): print("*"*80) db = connectToMySQL("belt_db") query = "SELECT first_name FROM users WHERE user_id=%(id)s;" data = { "id": session['user_id'] } result = db.query_db(query, data) print(result) db = connectToMySQL("belt_db") query = ''' SELECT * FROM jobs JOIN users ON user_id = created_by_user_id WHERE job_id = %(id)s;''' data = { "id": id } jobs=db.query_db(query, data) return render_template('view_job.html', result=result, jobs=jobs) @app.route('/jobs/edit/<id>') def edit_user(id): print("*"*80) db = connectToMySQL("belt_db") query = "SELECT first_name FROM users WHERE user_id=%(id)s;" data = { "id": session['user_id'] } result = db.query_db(query, data) print(result) return render_template("edit_job.html", result=result) @app.route('/jobs/update/<id>', methods=['POST']) def update_user(id): print("*"*80) is_valid=True if len(request.form['job_title']) < 3: is_valid=False flash("Job Title must contain at least 3 characters") if len(request.form['job_description']) < 3: is_valid=False flash("Job description must contain at least 3 characters") if len(request.form['job_location']) < 3: is_valid=False flash("Job location must contain at least 3 characters") if not is_valid: return redirect('/jobs/edit/<id>') else: db = connectToMySQL("belt_db") query = "UPDATE jobs SET job_title=%(jt)s, job_description=%(jd)s, job_location=%(jl)s, updated_at=NOW() WHERE created_by_user_id=%(sid)s;" print(session['user_id']) data = { "jt": request.form["job_title"], "jd": request.form['job_description'], "jl": request.form['job_location'], "sid": session['user_id'] } db.query_db(query, data) return redirect("/dashboard") @app.route('/delete_job/<id>') def delete_job(id): print("*"*80) print(request.form) db = connectToMySQL("belt_db") query = "DELETE FROM jobs WHERE job_id=%(jid)s;" data = { "jid": id } db.query_db(query, data) return redirect("/dashboard") @app.route('/logout', methods=['POST']) def clear_session_keys(): print("*"*80) session.clear() return redirect(('/')) if __name__ == "__main__": app.run(debug=True)
998,633
6ebaa53a27a37174851e7bc570aa3cac79d514d6
import turtle # Everything that comes after the # is a # comment. # It is a note to the person reading the code. # The computer ignores it. # Write your code below here... turtle.penup() #Pick up the pen so it doesn’t #draw turtle.goto(-200,-100) #Move the turtle to the #position (-200, -100) #on the screen turtle.pendown() #Put the pen down to start #drawing #Draw the M: turtle.goto(-200,-100+200) turtle.goto(-200+50,-100) turtle.goto(-200+100,-100+200) turtle.goto(-200+100,-100) turtle.penup() #Pick up the pen so it doesn’t #draw #draw turtle.goto(-50,-100) #Move the turtle to the #position (-50,-100) #on the screenand end it before the next line. turtle.pendown() #Put the pen down to start #drawing #draw turtle.goto(-50,-100+200) #Move the turtle to the #position (-200, -100) #on the screen turtle.pendown() #Put the pen down to start #drawing turtle.goto(0,100) #Move the turtle to the #position (-200,-100) #on the screen turtle.penup() #Pick up the pen so it doesn’t #draw turtle.goto(-50,-100+200) #Move the turtle to the #position (-200, -100) turtle.penup() #Pick up the pen so it doesn't #draw turtle.goto(-50,0) #Move the turtle to the #position (-200, -100) #on the screen turtle.pendown() #Put the pen down to start #drawing turtle.goto(0,0) #Move the turtle to the #position (-200, -100) #on the screen turtle.penup() turtle.goto(-50,-100) turtle.pendown() turtle.goto(0,-100) turtle.penup() turtle.goto(50,-100) turtle.pendown turtle.goto(70,-100) turtle.pendown() turtle.goto(70,100) turtle.pendown() turtle.goto(125,100) turtle.penup turtle.goto(70,100) turtle.penup turtle.goto(70,0) turtle.pendown turtle.goto(125,0) turtle.mainloop()
998,634
8a0868870a8c3e59955b06f742bad47ffc1f11d4
from .client import GrpcClient, default_grpc_retry_predicate from . import exceptions __all__ = ["GrpcClient", "default_grpc_retry_predicate", "exceptions"]
998,635
a65bf235da6fa89dde327f35a883999927db726f
from torchvision.datasets import VisionDataset class DatasetMemory(VisionDataset): """ Args: imgs (list of array)): List of images in the dataset transform (callable, optional): A function/transform that takes in a sample and returns a transformed version. E.g, ``transforms.RandomCrop`` for images. Attributes: samples (list): List of (sample path, class_index) tuples """ def __init__(self, imgs, transform=None): super().__init__("~") self.transform = transform self.samples = imgs if len(self.samples) == 0: raise (RuntimeError("No imgs found in the dataset")) def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (sample, target) where target is class_index of the target class. """ sample = self.samples[index] if self.transform is not None: sample = self.transform(sample) return sample, 0 def __len__(self): return len(self.samples)
998,636
7013a3d588b987e7878f585dc515c5bdd1dc3c1b
# Tai Sakuma <tai.sakuma@gmail.com> from ..misc import mkdir_p import os ##__________________________________________________________________|| class WritePandasDataFrameToFile: def __init__(self, outPath): self._outPath = outPath def deliver(self, results): if results is None: return f = self._open(self._outPath) if len(results.index) == 0: towrite = " ".join([i for i in results.columns]) + "\n" towrite = towrite.encode() f.write(towrite) else: ## results.to_string(f, index = False) towrite = results.to_string(index = False) + "\n" towrite = towrite.encode() f.write(towrite) self._close(f) def _open(self, path): mkdir_p(os.path.dirname(path)) return open(path, 'w') def _close(self, file): file.close() ##__________________________________________________________________||
998,637
1e3fe9b964c4dbb2c0e1d6e73863b02064836cd2
import pytest from praw.exceptions import ClientException from praw.models import RemovalReason from ... import IntegrationTest class TestRemovalReason(IntegrationTest): def test__fetch(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons["110nhral8vygf"] assert reason.title.startswith("Be Kind") def test__fetch__invalid_reason(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons["invalid"] with pytest.raises(ClientException) as excinfo: reason.title assert str(excinfo.value) == ( f"Subreddit {subreddit} does not have the removal reason invalid" ) @pytest.mark.cassette_name("TestRemovalReason.test__fetch") def test__fetch_int(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons[0] assert isinstance(reason, RemovalReason) @pytest.mark.cassette_name("TestRemovalReason.test__fetch") def test__fetch_slice(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reasons = subreddit.mod.removal_reasons[-3:] assert len(reasons) == 3 for reason in reasons: assert isinstance(reason, RemovalReason) def test_delete(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons["110nhyk34m01d"] reason.delete() def test_update(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons["110nhk2cgmaxy"] reason.update(title="New Title", message="New Message") def test_update_empty(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons[1] reason.update() class TestSubredditRemovalReasons(IntegrationTest): def test__iter(self, reddit): reddit.read_only = False count = 0 subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) for reason in subreddit.mod.removal_reasons: assert isinstance(reason, RemovalReason) count += 1 assert count > 0 def test_add(self, reddit): reddit.read_only = False subreddit = reddit.subreddit(pytest.placeholders.test_subreddit) reason = subreddit.mod.removal_reasons.add(title="Test", message="test") assert isinstance(reason, RemovalReason)
998,638
88ab9ceee12c1354662e64594301d32628283ac7
'''crie um programa que peça a usuário dois numeros e faça a soma''' print('insira dois numeros para fazer a soma') primeiroNumero = float(input('Digite o primeiro numero: ')) segundoNumero = float(input('Digite o segundo numero: ')) soma = float(primeiroNumero + segundoNumero) print(f'a soma entre {primeiroNumero} e {segundoNumero} vale: {soma}')
998,639
a62fa3cc318ef9ca979812b2f5efbf20236ddbe4
################################################################ ################# Preprocessing for the FNM #################### ################################################################ ## ## ################################################################ ## so far, only works on models with a single fnm part, meshed ## with 3D brick-type elements (C3D8(R) or SC8(R)); it prepares ## a new input file and fnm modules with fBricklam type only. ################################################################ ## ## ################################################################ ## Applicable abaqus input file: ## - multiple normal Abaqus parts allowed ## - single fnm part (named 'fnm') meshed with either C3D8(R) ## or SC8(R) elements ## - fnm part definition can include *Node, *Element, *Nset and ## *Elset, elset=predelam ## - all nsets in the fnm must be defined for applying bcd ## - assembly definition can be relatively generic, except that ## new nsets of the nodes on the fnm part cannot be defined. ## - boundary conditions must be applied on the nsets ################################################################ # glb objects defined for FNM from preproc_classes import* import math # operating system module import os, sys # shutil module has functions for different kinds of copying import shutil # define get integer def GetInteger2(): nnn = None while nnn != 1 and nnn !=2: try: s=sys.stdin.readline() nnn = int(s) except ValueError: print ('Invalid Number. Enter integer. ') return nnn #*************************************************************** # define parameters #*************************************************************** # ndim : dimension; currently only support 3D # nprops : no. of input material properties used in uel code (min 1) # nsvars : no. of sol. dpdnt var. used and ouput by uel code (min 1) # uelcode : code for the user element fBrickLam, used in uel input file # uellinecount : max no. of entries in a data line in uel input file (16) # uellinelength : line length in writing uel input file (max. =80 in *Element) # fnmlinelength : line length in writing fnm input file (max. =132) ndim = 3 nprops = 1 nsvars = 1 uelcode = 308 uellinecount = 14 uellinelength = 70 fnmlinelength = 120 #*************************************************************** # fetch original input file and open uel files #*************************************************************** # ask for job name jobname = raw_input('abaqus job name:') # abaqus original input file abqinputfile = jobname+'.inp' # uel input file uelinputfile = 'uel_'+jobname+'.inp' # uel nodes and elems files, to be included in uel input file uelnodesfile = 'uel_nodes.inp' uelelemsfile = 'uel_elems.inp' # open files abq_input = open(abqinputfile,'r') uel_input = open(uelinputfile,'w') uel_nodes = open(uelnodesfile,'w') uel_elems = open(uelelemsfile,'w') #*************************************************************** # Open Fortran modules to be written during pre-processing #*************************************************************** fnm_nodes = open('fnm_nodes.f90','w') # list of all nodes fnm_edges = open('fnm_edges.f90','w') # list of all nodes fnm_elems = open('fnm_elems.f90','w') # list of all elems fnm_predelam = open('fnm_predelam.f90','w') # list of all predelam elems and the predelam interf #*************************************************************** # Read Abaqus input file #*************************************************************** # Store all lines first All_lines = abq_input.readlines() # Find the length of all lines lenAll = len(All_lines) #================================================== # READ HEADER SECTION: #================================================== header = [] # list of header lines header.extend(All_lines[0:5]) #================================================== # READ PARTS SECTION: # stores all lines of ordinary parts # reads info of the fnm part and preprocess the following: # - store part's real nodes, elems & its real nodes # - find all breakable edges & create fl. nodes for each edge # - add fl. nodes to part's node list & elem's node list # - add edges to part's edge list & elem's edge list # - store nsets and the associated edges # - store elsets # ** NOTE **: # - only ONE fnm part is read # - only a SINGLE-layer mesh is supported for the fnm part # - only ONE type of elem is supported in the fnm part (all elems will be FNM-ized) # - only *Node, *Element, *Nset, *Elset sections are supported in the fnm part #================================================== fnmparts = [] # list of all fnm parts in the model ordparts = [] # list of all ordinary parts in the model predelam = [] # predelam elset (only one) in the model # find the line no. of *Part and store in jparts jparts = [j for j,line in enumerate(All_lines) if '*Part' in line] for jp in jparts: # read Part name pname = All_lines[jp][12:].rstrip() # if fnm is not a keyword, then this part is an ordinary part, store everything if not ('fnm' in pname): ordparts.append( opart(lines=[]) ) for j in range(jp,lenAll): jline = All_lines[j] ordparts[-1].lines.append(jline) # break out of for loop if end of part is reached if('*End Part' in jline): break # proceed to preprocessing if it is a fnm part else: # check if fnmparts is not more than one if (len(fnmparts) > 1): print("ERROR: more than one fnm part is not yet supported!") sys.exit() # ask for layup from user for this fpart # isSymLam: yes if the laminate layup is symmetric # rawlayup: layup of the laminate in a list of all plies # blklayup: layup of the laminate in a list of plyblocks rawlayup = [] blklayup = [] # ask if the lamiante layup is symmetric print (" Is the model a half-laminate with symmetric mid-plane: 1=yes 2=no ") isSymLam = GetInteger2() # ask for a list of ply angles rawlayup = \ input('Enter fibre angles (int/float) of all plies in the model, \ from bottom ply to top or mid ply (if the model is half-laminate); \ separate each ply by a comma; in case of a single ply, end with a comma:') # check if it is a list and if all elements in the list are numbers while ( (not isinstance(rawlayup, (list,tuple))) or \ (not all(isinstance(item, (int,float)) for item in rawlayup)) ): rawlayup = \ input('Enter fibre angles (int/float) of all plies in the model, \ from bottom ply to top or mid ply (if the model is half-laminate); \ separate each ply by a comma; in case of a single ply, end with a comma:') # ask for the thickness of a single ply plythick = \ input('Enter the thickness of a single ply (positive real number):') while ( not ( isinstance(plythick, float) and plythick > 0 ) ): plythick = \ input('Enter the thickness of a single ply (positive real number):') # find blocked plies and update blklayup # initiate blklayup blklayup.append( plyblk(angle=rawlayup[0], nplies=0, thickness=0.0) ) for plyangle in rawlayup: if (plyangle == blklayup[-1].angle): blklayup[-1].nplies += 1 blklayup[-1].thickness += plythick else: blklayup.append( plyblk(angle=plyangle, nplies=1, thickness=plythick) ) # if the laminate is symmetric, change the nplies for the last plyblk if (isSymLam == 1): blklayup[-1].nplies = 2 * blklayup[-1].nplies # create a new part in parts list fnmparts.append(fpart(name=pname, layup=blklayup, nodes=[], NtN=[], \ edges=[], elems=[], toprnds=[], botrnds=[], nsets=[], elsets=[])) # find the line of *Node, *Element, *Nset, *Elset # ** NOTE: # only ONE Node section (default) # only ONE type of elem is supported; all elems in this part will be FNM-ized # nset can be multiple # only ONE elset, which is the predelam elset, is supported jpend = next( j for j in range(jp,lenAll) if '*End Part'in All_lines[j] ) jnode = next( j for j in range(jp,jpend) if '*Node' in All_lines[j] ) jelems = [ j for j in range(jp,jpend) if '*Element' in All_lines[j] ] jnsets = [ j for j in range(jp,jpend) if '*Nset' in All_lines[j] ] jlsets = [ j for j in range(jp,jpend) if '*Elset' in All_lines[j] ] # report error if multiple elem defs exist in this fnm part if (len(jelems) != 1): print("ERROR: exactly ONE type of elem is supported in fnm part!") sys.exit() # report error if multiple elset defs exist in this fnm part if (len(jlsets) > 1): print("ERROR: exactly ONE elset, predelam, is supported in fnm part!") sys.exit() # report error if the elset is not named predelam if (len(jlsets) == 1): if not ('predelam' in All_lines[jlsets[0]]): print("ERROR: only the elset named predelam is supported in fnm part!") sys.exit() # read real(original) nodes of this part for jn in range(jnode+1,jpend): nline = All_lines[jn] # break out of for loop if end of node section is reached if('*' in nline): break # read the coords of this node into a list of float numbers coords = [] for t in nline.split(','): try: coords.append(float(t)) except ValueError: pass # store the coords in nodes list of this part fnmparts[-1].nodes.append(node(x=coords[1], y=coords[2], z=coords[3])) # update the node-to-node (NtN) matrix row length fnmparts[-1].NtN.append([]) ##check node correctness #for nd in fnmparts[-1].nodes: # print(str(nd.x)+','+str(nd.y)+','+str(nd.z)) # append the NtN matrix column to the correct length (nnode) for r in range(len(fnmparts[-1].NtN)): fnmparts[-1].NtN[r] = [0]*len(fnmparts[-1].NtN) # read elems of this part jelem = jelems[0] for je in range(jelem+1,jpend): eline = All_lines[je] # break out of for loop if end of elem section is reached if('*' in eline): break # read the index and real nodes of this elem into a list of int numbers el = [] for t in eline.split(','): try: el.append(int(t)) except ValueError: pass id = el[0] # elem index nds = el[1:] # elem real nodes # store the index and real nodes in elem list of this part fnmparts[-1].elems.append(element(index=id, nodes=nds, edges=[])) # update the top and bot surf real nodes lists halfnds = len(nds)/2 for j in range(halfnds): # bot surf real nodes if not(nds[j] in fnmparts[-1].botrnds): fnmparts[-1].botrnds.append(nds[j]) # top surf real nodes if not(nds[halfnds+j] in fnmparts[-1].toprnds): fnmparts[-1].toprnds.append(nds[halfnds+j]) # form edges # in 3D FNM for composites, only edges parallel to shell plane are breakable # j = 0: bottom edges; # j = 1: top edges for j in range(2): # j = 0: loop over real nodes on bot surf # j = 1: loop over real nodes on top surf for i in range(halfnds): # ith node on surf j row = nds[ j*halfnds + i ] - 1 # (i+1)th node on surf j; # if i is the last node, i+1 is the first node if i == halfnds-1: col = nds[ j*halfnds ] - 1 else: col = nds[ j*halfnds + i + 1 ] - 1 # fill in the NtN matrix: # fill in the edge index composed of the two end nodes if fnmparts[-1].NtN[row][col]==0: # this pair of nodes hasn't formed an edge # a new edge will be formed # indices of 2 fl. nodes on this new edge # they are two new nodes to be added in the part's node list fn1 = len(fnmparts[-1].nodes) fn2 = fn1 + 1 fnmparts[-1].nodes.append( node(0.0,0.0,0.0) ) fnmparts[-1].nodes.append( node(0.0,0.0,0.0) ) # form a new edge and append to existing list of edges # the new edge has 4 nodes: 2 real, 2 floating fnmparts[-1].edges.append(edge(nodes=[row+1,col+1,fn1+1,fn2+1])) # fill the new edge index in the NtN matrix # nodes in rev. order makes the same edge in rev. dir. fnmparts[-1].NtN[row][col] = len(fnmparts[-1].edges) fnmparts[-1].NtN[col][row] = -len(fnmparts[-1].edges) # append this edge no. in this elem fnmparts[-1].elems[-1].edges.append(fnmparts[-1].NtN[row][col]) # append the fl. nodes in this elem fnmparts[-1].elems[-1].nodes.extend([fn1+1,fn2+1]) else: # this pair of nodes has already formed an edge eg = fnmparts[-1].NtN[row][col] # append this edge no. in this elem fnmparts[-1].elems[-1].edges.append(eg) # find the two fl. nodes on this edge fn1 = fnmparts[-1].edges[abs(eg)-1].nodes[2] fn2 = fnmparts[-1].edges[abs(eg)-1].nodes[3] # append the fl. nodes in this elem # edge is in the same order as saved if eg > 0: fnmparts[-1].elems[-1].nodes.extend([fn1,fn2]) # edge is in the rev. order as saved else: fnmparts[-1].elems[-1].nodes.extend([fn2,fn1]) ##check elem correctness #for el in fnmparts[-1].elems: # print(str(el.index)+','+str(el.nodes)+','+str(el.edges)) ##check NtN #print(str(fnmparts[-1].NtN)) # read nsets of this part for jns in jnsets: nline = All_lines[jns].rstrip() # remove 'generate' in the line if present if ('generate' in All_lines[jns]): nline = All_lines[jns][0:-11] # add this nset in the list of nsets in this fpart fnmparts[-1].nsets.append( nset( name=nline, rnodes=[], edges=[] ) ) # read nodes in the nset # if generate is used, then calculate all nodes; # otherwise, read all nodes directly if ('generate' in All_lines[jns]): nline = All_lines[jns+1] nl = [] for t in nline.split(','): try: nl.append(int(t)) except ValueError: pass nds = nl[0] # start node ndf = nl[1] # final node try: itv = nl[2] # interval except IndexError: itv = 1 for n in range(nds,ndf+1,itv): fnmparts[-1].nsets[-1].rnodes.append(n) else: # read the lines of nodes in this nset nl = [] # list of node to be filled for n in range(jns+1,jpend): nline = All_lines[n] # break out of loop if end of section encountered if ('*' in nline): break for t in nline.split(','): try: nl.append(int(t)) except ValueError: pass fnmparts[-1].nsets[-1].rnodes.extend(nl) # find the edges involved in this nset, # and include the fl. nodes in the nset # extract this nset from fpart nst = fnmparts[-1].nsets[-1] # loop over all node pairs in this nset for n1 in range(len(nst.rnodes)-1): for n2 in range(n1+1,len(nst.rnodes)): rnd = nst.rnodes[n1]-1 cnd = nst.rnodes[n2]-1 # if this node pair forms an edge if (fnmparts[-1].NtN[rnd][cnd]!=0): # get this edge number eg = abs(fnmparts[-1].NtN[rnd][cnd]) #print(' node '+str(rnd)+' node '+str(cnd)+' forms edge '+str(eg)) # store this edge in the nset nst.edges.append(eg) # update this nset in fpart fnmparts[-1].nsets[-1] = nst # read the predelam elset of the fnm part for jels in jlsets: eline = All_lines[jels].rstrip() # remove 'generate' in the line if present if ('generate' in All_lines[jels]): eline = All_lines[jels][0:-11] # add this elset in the list of elsets in this fpart fnmparts[-1].elsets.append( elset( name=eline, elems=[] ) ) # read elems in the elset # if generate is used, then calculate all elems; # otherwise, read all elems directly if ('generate' in All_lines[jels]): eline = All_lines[jels+1] el = [] for t in eline.split(','): try: el.append(int(t)) except ValueError: pass els = el[0] # start elem elf = el[1] # final elem try: itv = el[2] # interval except IndexError: itv = 1 for e in range(els,elf+1,itv): fnmparts[-1].elsets[-1].elems.append(e) else: # read the lines of nodes in this nset el = [] # list of elems to be filled for e in range(jels+1,jpend): eline = All_lines[e] # break out of loop if end of section encountered if ('*' in eline): break for t in eline.split(','): try: el.append(int(t)) except ValueError: pass fnmparts[-1].elsets[-1].elems.extend(el) # store the elset in the predelam list predelam.append(fnmparts[-1].elsets[-1]) # check if fnmparts is one if (len(fnmparts) != 1): print("ERROR: exactly one fnm part is supported!") sys.exit() #================================================== # read assembly section: # - only a single assembly is supported #================================================== assembly = [] # find the line no. of *Assembly and store in jassemblies jassemblies = [j for j,line in enumerate(All_lines) if '*Assembly' in line] if (len(jassemblies)!=1): print("ERROR: exactly ONE assembly is supported!") sys.exit() # copy everything of the assembly ja = jassemblies[0] for j in range(ja,lenAll): jline = All_lines[j] assembly.append(jline) if '*End Assembly' in jline: break #print(assembly) #================================================== # read material section: #================================================== materials = [] # find the line no. of *Material and store in materials jmaterials = [j for j,line in enumerate(All_lines) if '*Material' in line] # copy everything of the material for jm in jmaterials: for j in range(jm,lenAll): jline = All_lines[j] materials.append(jline) if '**' in jline: break #================================================== # read (fixed) boundary section: #================================================== bcds = [] # list of all boundary conditions # find the separation line '** -------------' jdash = next(j for j,line in enumerate(All_lines) \ if '** ----------------------------------------------------------------' in line) # find the lines with *boundary jbcds = [j for j in range(0,jdash) if '*Boundary' in All_lines[j]] # loop over all bcds, store them without modification for jb in jbcds: for k in range(jb,jdash): bline = All_lines[k] if ('**' in bline): break bcds.append(bline) #print(bcds) #================================================== # read step # add control parameters #================================================== step = [] jstep = next(j for j, line in enumerate(All_lines) if '*Step' in line) joutput = next(j for j, line in enumerate(All_lines) if '** OUTPUT REQUESTS' in line) step.extend(All_lines[jstep:joutput]) # add control parameters step.append('*Controls, reset\n') step.append('*Controls, parameters=time incrementation\n') step.append('3200, 4000, , 6000, 4800, 50, , 125, , , \n') step.append('**\n') #print(step) #================================================== # read output #================================================== output = [] output.extend(All_lines[joutput:]) #print(output) #*************************************************************** # write nodes of the fnm part #*************************************************************** # extract layup of the fnm part blklayup = fnmparts[0].layup # find no. of plyblocks nplyblk = len(blklayup) # find no. of nodes and edges in a ply of this mesh nnode_p = len(fnmparts[0].nodes) nedge_p = len(fnmparts[0].edges) # find internal nodes for an interface of this mesh nnodein = nedge_p # find the total no. of nodes in this mesh nnodett = nplyblk * nnode_p + (nplyblk-1) * nnodein # write fnm_nodes.f90 header fnm_nodes.write('subroutine set_fnm_nodes() \n') fnm_nodes.write('use parameter_module, only: DP, ZERO \n') fnm_nodes.write('use node_list_module, only: node_list \n') fnm_nodes.write('use fnode_module, only: update \n') fnm_nodes.write(' \n') fnm_nodes.write(' integer :: nnode=0 \n') fnm_nodes.write(' \n') fnm_nodes.write(' nnode='+str(nnodett)+' \n') fnm_nodes.write(' allocate(node_list(nnode)) \n') for ipb in range(nplyblk): # calculate the bot and top real node z-coordinate # zbot = 0 if this is the 1st plyblk if (ipb == 0): zbot = 0.0 # zbot = thickness of last plyblk else: zbot = zbot + blklayup[ipb-1].thickness # ztop = zbot + thickness of this plyblk ztop = zbot + blklayup[ipb].thickness # loop over all nodes in the single ply mesh for cntr0, nd in enumerate(fnmparts[0].nodes): # current node id of the node on the ith plyblk cntr = ipb * nnode_p + (cntr0+1) # check if this node is a real node on the bot/top surf, or a fl. node # must use cntr0+1 as bot/topnds lists are for nodes in 1st plyblk if ((cntr0+1) in fnmparts[0].botrnds): zz = zbot elif ((cntr0+1) in fnmparts[0].toprnds): zz = ztop else: zz = 0.0 # write this node coords in uel_nodes.inp uel_nodes.write\ (str(cntr)+', '+str(nd.x)+', '+str(nd.y)+', '+str(zz)+'\n') # write this node coords in fnm node_list fnm_nodes.write\ (' call update(node_list('+str(cntr)+'),\ x=['+str(nd.x)+'_DP,'+str(nd.y)+'_DP,'+str(zz)+'_DP],\ u=[ZERO,ZERO,ZERO])\n') # write the additional nodes of interfaces if they're present if (nplyblk > 1): for jintf in range(nplyblk-1): # loop over all edges in the base ply mesh # each edge has one additional node for cntr0 in range(nedge_p): cntr = nplyblk * nnode_p + jintf * nedge_p + cntr0 + 1 # write this node coords in uel_nodes.inp uel_nodes.write\ (str(cntr)+', 0.0, 0.0, 0.0\n') # write this node coords in fnm node_list fnm_nodes.write\ (' call update(node_list('+str(cntr)+'),\ x=[ZERO,ZERO,ZERO],\ u=[ZERO,ZERO,ZERO])\n') fnm_nodes.write('\n') fnm_nodes.write('end subroutine set_fnm_nodes\n') #*************************************************************** # write edges #*************************************************************** # find the total no. of edges in this mesh nedgett = nplyblk * nedge_p # write fnm_edges.f90 header fnm_edges.write('subroutine set_fnm_edges() \n') fnm_edges.write('use fedge_module, only: update \n') fnm_edges.write('use edge_list_module, only: edge_list \n') fnm_edges.write(' \n') fnm_edges.write(' integer :: nedge=0 \n') fnm_edges.write(' \n') fnm_edges.write(' nedge='+str(nedgett)+' \n') fnm_edges.write(' allocate(edge_list(nedge)) \n') # update bcd edges for nst in fnmparts[0].nsets: # only constrain the edges in nst with keyword 'tie' if ('tie' in nst.name): # if all the real nodes in this nst are on the bot surface, then # only store the bot plyblk nodes, DO NOT include the other plies if all(n in fnmparts[0].botrnds for n in nst.rnodes): pstart = 0 pend = 1 # if all the real nodes in this nst are on the top surface, then # only store the top plyblk nodes, DO NOT include the other plies elif all(n in fnmparts[0].toprnds for n in nst.rnodes): pstart = nplyblk-1 pend = nplyblk # otherwise, store corresponding nodes of all plyblks else: pstart = 0 pend = nplyblk # find the bcd edges for jpb in range(pstart,pend): for jeg in nst.edges: jedge = jeg + jpb * nedge_p fnm_edges.write(' call update(edge_list('+str(jedge)+'), tie_bcd=.true.) \n') fnm_edges.write('\n') fnm_edges.write('end subroutine set_fnm_edges\n') #*************************************************************** # write elems #*************************************************************** # find no. of elems in a ply mesh nelem_p = len(fnmparts[0].elems) # find total no. of elems in the laminate # it is the same as nelem_p, as a fnm elem contains all plies&interfs nelemtt = nelem_p # find the no. of r+f nodes in an elem of a single plyblk elnndrf_p = len(fnmparts[0].elems[0].nodes) # find the no. of edges in an elem of a single plyblk elnedge_p = len(fnmparts[0].elems[0].edges) # find the no. of r+f nodes in an elem of the laminate elnndrf_l = elnndrf_p * nplyblk # find the no. of interface internal nodes in an elem of the laminate elnndin_l = elnedge_p * (nplyblk-1) # find the total no. of nodes in an elem of the laminate elnndtt_l = elnndrf_l + elnndin_l # find the no. of edges in an elem of the laminate elnedge_l = elnedge_p * nplyblk fnm_elems.write('subroutine set_fnm_elems() \n') fnm_elems.write('use parameter_module, only: DP \n') fnm_elems.write('use elem_list_module, only: layup, elem_list,& \n') fnm_elems.write(' & elem_node_connec, elem_edge_connec \n') fnm_elems.write('use fBrickLam_elem_module, only: plyblock_layup, set \n') fnm_elems.write(' \n') fnm_elems.write(' integer :: nelem = 0 \n') fnm_elems.write(' integer :: elnnode = 0 \n') fnm_elems.write(' integer :: elnedge = 0 \n') fnm_elems.write(' integer :: nplyblk = 0 \n') fnm_elems.write(' integer, allocatable :: nodecnc(:), edgecnc(:) \n') fnm_elems.write(' \n') fnm_elems.write(' nelem ='+str(nelemtt)+' \n') fnm_elems.write(' elnnode ='+str(elnndtt_l)+' \n') fnm_elems.write(' elnedge ='+str(elnedge_l)+' \n') fnm_elems.write(' nplyblk ='+str(nplyblk)+' \n') fnm_elems.write(' allocate(elem_list(nelem)) \n') fnm_elems.write(' allocate(elem_node_connec(elnnode,nelem)) \n') fnm_elems.write(' allocate(elem_edge_connec(elnedge,nelem)) \n') fnm_elems.write(' allocate(nodecnc(elnnode)) \n') fnm_elems.write(' allocate(edgecnc(elnedge)) \n') fnm_elems.write(' allocate(layup(nplyblk)) \n') fnm_elems.write(' nodecnc = 0 \n') fnm_elems.write(' edgecnc = 0 \n') fnm_elems.write(' \n') # write layup array for jpb in range(nplyblk): angle = str(blklayup[jpb].angle) nplies = str(blklayup[jpb].nplies) if '.' in angle: angle = angle+'_DP' else: angle = angle+'._DP' fnm_elems.write(' layup('+str(jpb+1)+')=plyblock_layup(angle='+angle+',nplies='+nplies+') \n') fnm_elems.write(' \n') for jel in range(nelemtt): elnds_p = [] elegs_p = [] elnds_l = [] elegs_l = [] # find the r+f nodes in this elem ply elnds_p = fnmparts[0].elems[jel].nodes # find the edges in this elem ply elegs_p = fnmparts[0].elems[jel].edges # find the r+f nodes & edges in this elem laminate; # abs is needed to remove the negative sign on some of the edge no. for jpb in range(nplyblk): elnds_l.extend( [ x + nnode_p * jpb for x in elnds_p ] ) elegs_l.extend( [ abs(x) + nedge_p * jpb for x in elegs_p ] ) # find the internal nodes of interfs in this elem laminate # intern. nodes are listed after r and f nodes in the node list # they are assigned to each edges of the interfaces # so the elem's edge connec is used for assignment of intern. nodes if nplyblk > 1: for jit in range(nplyblk-1): elnds_l.extend( [ abs(x) + nnode_p * nplyblk + nedge_p * jit for x in elegs_p ] ) #**** write elem's nodal and edge connec to uel&fnm_elems **** #** node cnc # start the line with elem index jel+1 eline = [str(jel+1)+','] # node cnc dataline for uel_elems fline = [''] # node cnc dataline for fnm_elems cntr = 0 # line entry count for uel_elems # add the node no. to the line one by one for k in elnds_l: # if the uel line gets too long, continue on next line if (len(eline[-1]+str(k)) >= uellinelength) or \ (cntr >= uellinecount): eline.append('') cntr = 0 # add the node no. to the line and update line count eline[-1] = eline[-1]+str(k)+',' cntr = cntr + 1 # if the fnm line gets too long, continue on the next line if (len(fline[-1]+str(k)) >= fnmlinelength): fline.append('') # add the node no. to the line fline[-1] = fline[-1]+str(k)+',' # remove the last comma from the eline eline[-1] = eline[-1][:-1] # remove the last comma from the fline fline[-1] = fline[-1][:-1] #** edge cnc gline = [''] # edge cnc dataline for fnm_elems # add the node no. to the line one by one for k in elegs_l: # if the fnm line gets too long, continue on the next line if (len(gline[-1]+str(k)) >= fnmlinelength): gline.append('') # add the node no. to the line gline[-1] = gline[-1]+str(k)+',' # remove the last comma from the fline gline[-1] = gline[-1][:-1] # write the line of elem node connec for l in eline: uel_elems.write(l+'\n') # set this elem in fnm_elems subroutine fnm_elems.write('\n') # write nodecnc array fnm_elems.write(' nodecnc=[ &\n') for l in fline: fnm_elems.write('& '+l+' &\n') fnm_elems.write('& ]\n') # write edgecnc array fnm_elems.write(' edgecnc=[ &\n') for l in gline: fnm_elems.write('& '+l+' &\n') fnm_elems.write('& ]\n') fnm_elems.write(' call set(elem_list('+str(jel+1)+'), NPLYBLKS='+str(nplyblk)+')\n') # write elem_node_connec array fnm_elems.write(' elem_node_connec(:,'+str(jel+1)+')=nodecnc(:)\n') # write elem_edge_connec array fnm_elems.write(' elem_edge_connec(:,'+str(jel+1)+')=edgecnc(:)\n') fnm_elems.write('\n') fnm_elems.write('end subroutine set_fnm_elems\n') #*************************************************************** # write predelam #*************************************************************** # write fnm_predelam.f90 header fnm_predelam.write('subroutine set_fnm_predelam() \n') fnm_predelam.write('use predelam_list_module, only: predelam_elems, predelam_interf \n') fnm_predelam.write(' \n') fnm_predelam.write(' integer :: npdelem \n') fnm_predelam.write(' \n') # check if there is only ONE predelam if (len(predelam) > 1): print("ERROR: more than one predelam is not yet supported!") sys.exit() # write the elem indices in the predelam elset for pd in predelam: npdelem = len(pd.elems) fnm_predelam.write(' npdelem='+str(npdelem)+' \n') fnm_predelam.write('\n') fnm_predelam.write(' allocate(predelam_elems(npdelem)) \n') fnm_predelam.write(' allocate(predelam_interf) \n') fnm_predelam.write('\n') # write all elems in the predelam elset for j,jel in enumerate(pd.elems): fnm_predelam.write(' predelam_elems('+str(j+1)+')='+str(jel)+'\n') # ask for the predelam interface no. pdinterf = \ input('Enter the pre-delamination interface, \ 1 means the first interface from the bottom (positive integer number):') while ( not ( isinstance(pdinterf, int) and pdinterf > 0 ) ): pdinterf = \ input('Enter the pre-delamination interface, \ 1 means the first interface from the bottom (positive integer number):') fnm_predelam.write('\n') fnm_predelam.write(' predelam_interf='+str(pdinterf)+'\n') fnm_predelam.write('\n') fnm_predelam.write('end subroutine set_fnm_predelam\n') #*************************************************************** # write uel input file #*************************************************************** #**** write HEADER **** for hline in header: uel_input.write(str(hline[0:])) #**** write ordinary part **** for op in ordparts: for line in op.lines: uel_input.write(str(line[0:])) #**** write FNM PART **** # part name uel_input.write('*Part, name='+fnmparts[0].name+'\n') # part nodes uel_input.write('*NODE,INPUT=uel_nodes.inp \n') # user element definition uel_input.write('*USER ELEMENT, TYPE=U'+str(uelcode)+\ ', NODES='+str(elnndtt_l)+', COORDINATES='+str(ndim)+\ ', PROPERTIES='+str(nprops)+', VARIABLES='+str(nsvars)+'\n') uel_input.write('1,2,3\n') # elements and fnm elset uel_input.write('*ELEMENT, TYPE=U'+str(uelcode)+', ELSET=fnm, INPUT=uel_elems.inp \n') # write the mandatory uel property line (not needed for calculation) uel_input.write('*UEL PROPERTY, ELSET=fnm\n') uel_input.write('1\n') # elset (predelam) does not need to be written here in uel input # nsets nsets = fnmparts[0].nsets for nst in nsets: # write the nset name uel_input.write(nst.name+'\n') # nst dataline for uel_input, to be filled, and line count initiated nstline = [''] cntr = 0 # if all the real nodes in this nst are on the bot surface, then # only store the bot plyblk nodes, DO NOT include the other plies if all(n in fnmparts[0].botrnds for n in nst.rnodes): pstart = 0 pend = 1 # if all the real nodes in this nst are on the top surface, then # only store the top plyblk nodes, DO NOT include the other plies elif all(n in fnmparts[0].toprnds for n in nst.rnodes): pstart = nplyblk-1 pend = nplyblk # otherwise, store corresponding nodes of all plyblks else: pstart = 0 pend = nplyblk # find nst nodes in plyblks for jpb in range(pstart,pend): # add the real nodes to the list one by one for n in nst.rnodes: # find the corresponding node on the jpb-th plyblk k = n + jpb * nnode_p # if the uel line gets too long, continue on the next line if (len(nstline[-1]+str(k)) >= uellinelength) or \ (cntr >= uellinecount): nstline.append('') cntr = 0 # add the node no. to the line and update line count nstline[-1] = nstline[-1]+str(k)+',' cntr = cntr + 1 # add the fl. nodes to the list one by one, if it is not a tie nst if not ('tie' in nst.name): for eg in nst.edges: k1 = fnmparts[0].edges[eg-1].nodes[2] + jpb * nnode_p k2 = fnmparts[0].edges[eg-1].nodes[3] + jpb * nnode_p # if the uel line gets too long, continue on the next line if (len(nstline[-1]+str(k1)+str(k2)) >= uellinelength) or \ (cntr >= uellinecount): nstline.append('') cntr = 0 # add the nodes to the line nstline[-1] = nstline[-1]+str(k1)+','+str(k2)+',' cntr = cntr + 2 # remove the last comma from the line nstline[-1] = nstline[-1][:-1] # write all original nodes of the nset for nstl in nstline: uel_input.write(nstl+'\n') # end writing fnm part in uel input uel_input.write('*End Part\n') #**** write ASSEMBLY **** for aline in assembly: uel_input.write(str(aline[0:])) #**** write Material **** for mline in materials: uel_input.write(str(mline[0:])) #**** write fixed BCDs **** for bline in bcds: uel_input.write(str(bline[0:])) #**** write step **** for sline in step: uel_input.write(str(sline[0:])) #**** write output **** for oline in output: uel_input.write(str(oline[0:])) #*************************************************************** # close all open files #*************************************************************** # close input files abq_input.close() uel_input.close() # close nodes files fnm_nodes.close() uel_nodes.close() # close edges file fnm_edges.close() # close elems files fnm_elems.close() uel_elems.close() # close predelam file fnm_predelam.close() #*************************************************************** # copy fnm input file to main directory #*************************************************************** # get current working directory cwd = os.getcwd() # parent directory of cwd pwd = os.path.dirname(cwd) # copy fnm input file to parent directory of preprocessing directory (which is assumed to be the working directory) shutil.copy (uelinputfile,pwd) shutil.copy (uelnodesfile,pwd) shutil.copy (uelelemsfile,pwd)
998,640
702b6afc44fe20c3bbeae700fdf3aec662dd923d
""" CP1404/CP5632 Practical File renaming and os examples """ import shutil import os import string # TODO: Fix this def main(): """Demo file renaming with the os module.""" print("Current directory is", os.getcwd()) os.chdir('Lyrics/Lyrics') for dir_name, dir_list, file_list in os.walk("."): for filename in file_list: file_path = dir_name + "\\" + filename new_name = get_fixed_filename(file_path) os.rename(file_path, new_name) def get_fixed_filename(filename): filename = filename.replace(" ", "_").replace(".TXT", ".txt") new_name = "" for i, letter in enumerate(filename): if filename[i -1] == "_": letter = letter.upper() if letter.isupper() and filename[i - 1] != "_" and i != 0 and filename[i -1] not in string.punctuation: letter = "_{}".format(letter) new_name += letter return new_name main()
998,641
b2f3d65c6e6bb1c2f8a7f5c3da4f378e32a2a392
# I'm Yang. It's my first 2D game based on pygame. """ @author: Yang Zhou """ from Role import * pygame.init() # set screen screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) filename = 'image/shoot.png' plane_img = pygame.image.load(filename) ADDENEMY = pygame.USEREVENT + 1 pygame.time.set_timer(ADDENEMY, 250) player = Player() background = pygame.image.load('background.bmp').convert() game_over = pygame.image.load('gameover.bmp') # load music pygame.mixer.music.load('sound/game_music.wav') game_over_sound = pygame.mixer.Sound('sound/game_over.wav') game_over_sound.set_volume(0.3) pygame.mixer.music.play(-1, 0.0) pygame.mixer.music.set_volume(0.25) # make sprite group enemies = pygame.sprite.Group() all_sprites = pygame.sprite.Group() all_sprites.add(player) # score at first score = 0 # keep run running = True clock = pygame.time.Clock() while running: clock.tick(60) # detect events for event in pygame.event.get(): if event.type == KEYDOWN: if event.key == K_ESCAPE: running = False elif event.type == QUIT: running = False elif event.type == ADDENEMY: new_enemy = Enemy() enemies.add(new_enemy) all_sprites.add(new_enemy) # fill screen backgroound screen.fill(0) screen.blit(background, (0, 0)) score_font = pygame.font.Font(None, 36) score_text = score_font.render(str(int(score)), True, (128, 128, 128)) text_rect = score_text.get_rect() text_rect.topleft = [10, 10] score += 1/60 screen.blit(score_text, text_rect) # once collide happened if pygame.sprite.spritecollideany(player, enemies): player.kill() player.is_hit = True break # if not keep going and draw all things pressed_keys = pygame.key.get_pressed() player.update(pressed_keys) enemies.update() for entity in all_sprites: screen.blit(entity.image, entity.rect) pygame.display.update() #record the score font = pygame.font.Font(None, 48) text = font.render('Score: ' + str(int(score)), True, (255, 0, 0)) text_rect = text.get_rect() text_rect.centerx = screen.get_rect().centerx text_rect.centery = screen.get_rect().centery + 24 game_over_sound.play() screen.blit(game_over, (0, 0)) screen.blit(text, text_rect) while 1: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit() pygame.display.update()
998,642
afcab117479b6d646285986541cd1217bf980e05
WHITE = "WHITE" BLACK = "BLACK" ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" SIZE = 8 IMAGE_SIZE = 100 from enum import Enum class Color(Enum): RED = (255,0,0) BLUE = (0,0,255) GRAY = (100,100,100) GREEN = (0,255,0) WHITE = (255,255,255) BLACK = (0,0,0) BROWN = (150, 75, 0) DARK_BROWN = (66, 33, 0)
998,643
9ef7f1b4d8e9b71136a65e9f50c51f2ef80ed14d
num1 = input("Enter a number: ") num2= input("Enter another decimal: ") result = int(num1) + float (num2) print (result) num3 = input("Enter a number: ") num4 = input("Enter a number: ") result= int(num3) + int(num4) print (result) num5 = input("Enter a decimal: ") num6 = input("Enter a decimal: ") result = float(num5) + float(num6) print (result)
998,644
17e7aa04f4524509c516413de01b6013716a2fab
#!/usr/bin/env python import os import argparse as ap import pandas as pd from uc.batch import batch_process def main(): parser = ap.ArgumentParser(prog='ucmod', description='Batch tool for converting UC spectra to IR spectra.') # String valued arguments parser.add_argument('-s', '--spectra', type=str, help="Path to folder containing UC spectra.") parser.add_argument('-d', '--dark', type=str, help="Path to dark spectrum.") parser.add_argument('-r', '--reference', type=str, help="Path to reference spectrum.") parser.add_argument('-o', '--out', type=str, help="output file") # Bool or list values arguments parser.add_argument('--header', action="store_true", help='Spectrum files contain header metadata.') parser.add_argument('-t', '--type', type=str, choices=['A', 'a', 'R', 'r'], default='A', help='Desired spectrum type e.g lg(Reflectance) [A, a] or Reflectance [R, r].') args = parser.parse_args() # Parsing arguments to args object. ## Batch process batch_process( dark=args.dark, reference=args.reference, measurments=args.spectra, output=args.out, header=args.header, spec_type=args.type) ## Print confirmation list_of_files = os.listdir(args.spectra) type_of_spectra = { 'a' : 'Log(R)', 'r' : 'Reflectance' } print_stmt = f'Processed {len(list_of_files)} UC spectra to IR-{type_of_spectra[args.type.lower()]}' \ f' and saved to "{args.out}".' print(print_stmt) return if __name__ == '__main__': main()
998,645
738c1a0d893fda11408cf750e1ca24ef8d4a99e3
import os import re import pytest from dazzler.system import Component, Aspect from . import spec_components as spec def test_simple_component(): # Sanity test. class SimpleComponent(Component): simple = Aspect() component = SimpleComponent({'simple': 'foo'}) assert component.simple == 'foo' def test_missing_required_aspect(): # Missing required props should raise an informative Error. with pytest.raises(TypeError) as context: # pylint: disable=no-value-for-parameter spec.TestComponent() assert context.value.args[0] == "__init__() missing 1 required positional argument: 'required_string'" # noqa: E501 def test_undefined_aspects(): t = spec.TestComponent('') output = t._prepare() assert 'string_prop' not in output['aspects'] def test_null_aspects(): # Test aspects can be set to None and included in prepare if not required. t = spec.TestComponent('', string_prop=None) output = t._prepare() assert output['aspects']['string_prop'] is None def test_default_aspects_not_in_prepare(): # Default aspects should not be in output # as they are handled on the frontend component = spec.TestComponent('') output = component._prepare() assert 'string_with_default' not in output['aspects'] def test_default_aspect_can_be_changed(): # Default aspects can be changed component = spec.TestComponent('') component.string_with_default = 'Non default' assert component._prepare()['aspects']['string_with_default'] == 'Non default' # noqa: E501 @pytest.mark.skip('No initial bind trigger (yet) & logic for this was slow.') def test_default_aspect_set_default(): # Set the default value should be included. component = spec.TestComponent('', string_with_default='Foo') output = component._prepare() assert 'string_with_default' in output['aspects'] def test_default_aspect_set_default_after_init(): # Set after init with default should be included component = spec.TestComponent('') component.string_with_default = 'Foo' output3 = component._prepare() assert output3['aspects']['string_with_default'] == 'Foo' @pytest.mark.parametrize('prop_name, prop_default', [ ('string_default', "'Default string'"), ('string_default_empty', "''"), ('number_default', 0.2666), ('number_default_empty', 0), ('array_default', r'\[1, 2, 3\]'), ('array_default_empty', r'\[\]'), ('object_default', "{foo: 'bar'}"), ('object_default_empty', '{}'), ]) def test_default_props_docstring(prop_name, prop_default): # Test default props are formatted in the docstring. pattern = r':param {}:.*\(default={}\)'.format(prop_name, prop_default) assert re.search(pattern, str(spec.DefaultProps.__init__.__doc__)) def test_enum_docstring(): assert ":param enum_prop: (Possible values: 'News', 'Photos')" \ in spec.TestComponent.__init__.__doc__ @pytest.mark.parametrize('aspect, value', [ ('string_prop', 'string value'), ('number_prop', 1), ('number_prop', 1.5), ('bool_prop', True), ('object_prop', {'foo': 'bar'}), ('children', spec.TestComponent('', id='tc')), ]) def test_set_aspect(aspect, value): component = spec.TestComponent('') setattr(component, aspect, value) output = component._prepare() assert output['aspects'][aspect] == value \ if not isinstance(value, Component) else value._prepare() def test_docstring_length(): # The length of each line of the generated docstring should be < 80 docstring = spec.TestComponent.__init__.__doc__.split(os.linesep) for line in docstring: assert len(line) < 80, f'len({line}) > 79' assert '(default=UNDEFINED)' not in line
998,646
732ad8d1edc0bc4f591e96f2586472e3f05c53a7
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config: WTF_CSRF_ENABLED = False SECRET_KEY = 'this-place-we-should-put-some-random-key' SQLALCHEMY_COMMIT_ON_TEARDOWN = True SQLALCHEMY_TRACK_MODIFICATIONS = False class DevelopmentConfig(Config): DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository') class TestConfig(Config): DEBUG = True SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'test.db') SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'test_db_repository') config = { 'dev': DevelopmentConfig, 'default': DevelopmentConfig, 'test': TestConfig }
998,647
a584319b9fb02458acf2c0a44fae247f7121e0a4
def BaseM2Num(N): if N==0: return [0] else: List = [] while abs(N)>0: if N%2==0: N //= (-2) List.append(0) else: N = (N-1)//(-2) List.append(1) return List[::-1] List = BaseM2Num(int(input())) print(''.join(str(T) for T in List))
998,648
09434acb9fb42bc333181db80e092acbe740a897
from pymongo import MongoClient # passwo = "123" # mail = "hyderdanyal@gmail.com" def change(passwo, mail): print(passwo) client = MongoClient() client = MongoClient('localhost', 27017) db = client.vickytailor authentication = db.authentication # distinctemail = db['authentication'].distinct("email") for data in authentication.find(): print(data) print("mail", mail) print("email", data['email']) print(data['password']) if (mail == data['email']): print("same") authentication.update_one( {"email": mail}, {"$set": {"password": passwo}}) print("new password", data["password"]) # data['password'] == passwo # print("changed", mail) # return ("Password Change Successful") # else: # print("didnt", mail) # return ("Password Didnt change", data['email']) else: print("lalala")
998,649
8fe2cd3cdfcbba4c6ff57a79c586d772c2017749
import turtle import math pen = turtle.Turtle() pen.speed(0) def starfish(n=None): if n is not None and not n % 360: return if n is None: n = 0 pen.forward(40) pen.left(math.sin(n) * math.cos(n) + n) starfish(n + 30) starfish(179) turtle.done()
998,650
4c001ae1e9747c7d160982d1b391a6ea9ebe147d
# Buffer pH adjuster # www.scienceexposure.com # Ismail Uddin, 2015 # This has sections adapted from the sample code provided by Atlas Scientific # provided here: http://atlas-scientific.com/_files/code/pi_sample_code.pdf from rrb2 import * import serial rr = RRB2() # pH value to set buffer at ph_set = ("Input the value of pH you would like set as a digit:") print("Press Ctrl-C to stop adjusting the pH") # Initialising EZO pH circuit usbport = '/dev/ttyAMA0' ser = serial.Serial(usbport,9600) def ph_reading(): # pH meter readings line = "" data = ser.read() if data == "/r": ph_value = float(line) line = "" else: line = line + data return ph_value curr_ph = ph_reading while ph_set != curr_ph: curr_ph = ph_reading() print("Current pH: %s" % curr_ph) if curr_ph < ph_set: # Turn on pump supplying alkali rr.set_motors(0.25,0,0,0) print("Dispensing alkali...") time.sleep(0.35) rr.stop() elif curr_ph > ph_set: # Turn on pump supplying acid_pump rr.set_motors(0,0,0.25,0) print("Dispensing acid...") time.sleep(0.35) rr.stop() time.sleep(0.5)
998,651
c548f8e9988c9f3517aa9f74c636841f2dfe6935
#!/usr/bin/env python import RPi.GPIO as GPIO import time import signal import atexit #atexit.register(GPIO.cleanup) SERVO=5 GPIO.setmode(GPIO.BCM) GPIO.setup(SERVO, GPIO.OUT, initial=False) p = GPIO.PWM(SERVO,50) #50HZ p.start(0) time.sleep(1) DURATION=0.15 SLEEP=0.02 #MAX=181 BEGIN=70 MAX=171 while(True): for i in range(BEGIN,MAX,10): p.ChangeDutyCycle(2.5 + 10 * i / 180) time.sleep(SLEEP) p.ChangeDutyCycle(0) time.sleep(DURATION) for i in range(MAX,BEGIN,-10): p.ChangeDutyCycle(2.5 + 10 * i / 180) time.sleep(SLEEP) p.ChangeDutyCycle(0) time.sleep(DURATION)
998,652
64eea34e525256a46d67d3b54d7fa15d366a9956
# -*- coding: utf-8 -*- """ Created on Thu Jun 07 16:44:20 2018 @author: sdenaro """ # this file runs the Willamette operational model following the settings # specified in the file settings.xml and plots/validates the results import matplotlib.pyplot as plt import pandas as pd from pandas.plotting import autocorrelation_plot from pandas import ExcelWriter import numpy as np import scipy as sp from scipy import stats from scipy.interpolate import interp2d from sklearn import linear_model from sklearn.metrics import r2_score import xmltodict as xmld import datetime as dt import Willamette_model as inner #reading in the inner function import os import pylab as py import sys #%% T=365 #Set the simulation horizon (in days) initial_doy=1 #Set the simulation initial_doy sys.argv = ["settings.xml", str(initial_doy), str(T)] #simulation inputs execfile("Willamette_outer.py")
998,653
d468bb0121cf0657c2c67886bceeabf61d2f5e40
#!/usr/bin/env python import RPi.GPIO as GPIO import time def main(): print("starting") GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(23, GPIO.OUT) GPIO.setup(24, GPIO.OUT) GPIO.output(23, True) GPIO.output(24, True) time.sleep(1) GPIO.output(23, False) GPIO.output(24, False) GPIO.cleanup() if __name__ == "__main__": main()
998,654
bfac0d0a6e7d813d9e057c4756c511c67b68397a
from django.db import models # Create your models here. # 用户 class User(models.Model): class Meta: db_table = '用户' # id = models.AutoField(primary_key=True) name = models.CharField(max_length=100, verbose_name='昵称', null=False) accountNumber = models.CharField(max_length=50, verbose_name='账户', null=False) password = models.CharField(max_length=200, verbose_name='密码', null=False) status = models.CharField(max_length=5, verbose_name='状态', null=False) createTime = models.DateTimeField(auto_now_add=True,verbose_name='创建时间')
998,655
92e4ab011abffb9b796f8518c670f0a83fdaf241
# https://gist.github.com/Arachnid/491973 import bisect from abc import ABC, abstractmethod from typing import List, TypeVar, Optional, Iterable, Generic from typing_extensions import Protocol class SupportsStr(Protocol): """ word object that's fed into the automaton """ def __str__(self) -> str: """ :return: the word """ ... class NFA: EPSILON = object() ANY = object() def __init__(self, start_state): self.transitions = {} self.final_states = set() self._start_state = start_state @property def start_state(self): return frozenset(self._expand({self._start_state})) def add_transition(self, src, input, dest): self.transitions.setdefault(src, {}).setdefault(input, set()).add(dest) def add_final_state(self, state): self.final_states.add(state) def is_final(self, states): return self.final_states.intersection(states) def _expand(self, states): frontier = set(states) while frontier: state = frontier.pop() new_states = ( self.transitions.get(state, {}) .get(NFA.EPSILON, set()) .difference(states) ) frontier.update(new_states) states.update(new_states) return states def next_state(self, states, input): dest_states = set() for state in states: state_transitions = self.transitions.get(state, {}) dest_states.update(state_transitions.get(input, [])) dest_states.update(state_transitions.get(NFA.ANY, [])) return frozenset(self._expand(dest_states)) def get_inputs(self, states): inputs = set() for state in states: inputs.update(self.transitions.get(state, {}).keys()) return inputs def to_dfa(self): dfa = DFA(self.start_state) frontier = [self.start_state] seen = set() while frontier: current = frontier.pop() inputs = self.get_inputs(current) for input in inputs: if input == NFA.EPSILON: continue new_state = self.next_state(current, input) if new_state not in seen: frontier.append(new_state) seen.add(new_state) if self.is_final(new_state): dfa.add_final_state(new_state) if input == NFA.ANY: dfa.set_default_transition(current, new_state) else: dfa.add_transition(current, input, new_state) return dfa class DFA: def __init__(self, start_state): self.start_state = start_state self.transitions = {} self.defaults = {} self.final_states = set() def add_transition(self, src, input, dest): self.transitions.setdefault(src, {})[input] = dest def set_default_transition(self, src, dest): self.defaults[src] = dest def add_final_state(self, state): self.final_states.add(state) def is_final(self, state): return state in self.final_states def next_state(self, src, input): state_transitions = self.transitions.get(src, {}) return state_transitions.get(input, self.defaults.get(src, None)) def next_valid_string(self, input): state = self.start_state stack = [] # Evaluate the DFA as far as possible for i, x in enumerate(input): stack.append((input[:i], state, x)) state = self.next_state(state, x) if not state: break else: stack.append((input[: i + 1], state, None)) if self.is_final(state): # Input word is already valid return input # Perform a 'wall following' search for the lexicographically smallest # accepting state. while stack: path, state, x = stack.pop() x = self.find_next_edge(state, x) if x: path += x state = self.next_state(state, x) if self.is_final(state): return path stack.append((path, state, None)) return None def find_next_edge(self, s, x): if x is None: x = u"\0" else: x = chr(ord(x) + 1) state_transitions = self.transitions.get(s, {}) if x in state_transitions or s in self.defaults: return x labels = sorted(state_transitions.keys()) pos = bisect.bisect_left(labels, x) if pos < len(labels): return labels[pos] return None def levenshtein_automata(term: str, k): nfa = NFA((0, 0)) for i, c in enumerate(term): for e in range(k + 1): # Correct character nfa.add_transition((i, e), c, (i + 1, e)) if e < k: # Deletion nfa.add_transition((i, e), NFA.ANY, (i, e + 1)) # Insertion nfa.add_transition((i, e), NFA.EPSILON, (i + 1, e + 1)) # Substitution nfa.add_transition((i, e), NFA.ANY, (i + 1, e + 1)) for e in range(k + 1): term_len = len(term) if e < k: nfa.add_transition((term_len, e), NFA.ANY, (term_len, e + 1)) nfa.add_final_state((term_len, e)) return nfa T = TypeVar("T", covariant=True) class OrderedIterableCorpus(Generic[T], ABC): """ A corpus where T is the type of component """ @abstractmethod def get_next_smaller(self, lookup_string: str) -> Optional[SupportsStr]: """ needed to perform fuzzy search """ ... @abstractmethod def strings_to_elements(self, results: List[str]) -> Iterable[T]: """ how to convert the result strings to the """ ... WordType = TypeVar("WordType") class FuzzySearcher(Generic[WordType]): def __init__(self, words: OrderedIterableCorpus[WordType]): """ :param words: a list of database entries you want to include """ self._words = words def search(self, word_string: str, k: int) -> Iterable[WordType]: """Uses lookup_func to find all words within levenshtein distance k of word. Args: word_string: The word to look up k: Maximum edit distance lookup_func: A single argument function that returns the first word in the database that is less than or equal to the input argument. Yields: Every matching word within levenshtein distance k from the database. """ lev = levenshtein_automata(word_string, k).to_dfa() match = lev.next_valid_string(u"\0") strings = [] while match: next_word = self._words.get_next_smaller(match) if not next_word: break next_word_text = str(next_word) if match == str(next_word): strings.append(match) next_word_text += u"\0" match = lev.next_valid_string(next_word_text) return self._words.strings_to_elements(strings)
998,656
bb56a6238020390962e1934330f07d533927a585
#import torch #simport torch.nn as nn #class InterpretableClassifier(nn.Module): # def __init__(self, num_channels, num_classes, classifier='fc'): # super(InterpretableClassifier, self).__init__() # self.classifier = classifier # self.fc1 = nn.Linear(num_channels, 200) # self.relu1 = nn.ReLU() # self.fc2 = nn.Linear(200, 200) # self.relu2 = nn.ReLU() # self.fc = nn.Linear(200, num_classes) # def forward(self, x): # if self.classifier == 'gap': # out = x.mean(1) # else: # out = self.fc1(x) # out = self.relu1(out) # out = self.fc2(out) # out = self.relu2(out) # out = self.fc(out) # return out import torch import torch.nn as nn class InterpretableClassifier(nn.Module): def __init__(self, num_channels, num_classes, classifier='fc'): super(InterpretableClassifier, self).__init__() self.classifier = classifier if self.classifier == 'simplefc': self.fc_simple = nn.Linear(num_channels, num_classes) else: self.fc1 = nn.Linear(num_channels, 200) self.relu1 = nn.ReLU() self.fc2 = nn.Linear(200, 200) self.relu2 = nn.ReLU() self.fc = nn.Linear(200, num_classes) def forward(self, x): if self.classifier == 'gap': out = x.mean(1) elif self.classifier == 'simplefc': out = self.fc_simple(x) else: out = self.fc1(x) out = self.relu1(out) out = self.fc2(out) out = self.relu2(out) out = self.fc(out) return out # out = x.reshape(x.size(0), -1) # out = self.drop(out)
998,657
7614c3c61385796174a8b8b8b9046a13c539dd70
################################################################################# ##____ ___ _ _ ____ ___ _____ ____ ___ #| _ \_ _| \ | | __ ) / _ \_ _| |___ \ / _ \ #| |_) | || \| | _ \| | | || | __) || | | | #| __/| || |\ | |_) | |_| || | / __/ | |_| | #|_|__|___|_|_\_|____/_\___/_|_| __|_____(_)___/_____ ___ ___ _ _ #| _ \ | _ \ / _ \ / ___| | ____| _ \_ _|_ _|_ _/ _ \| \ | | #| |_) |____| |_) | | | | | | _| | | | | | | | | | | | | \| | #| __/_____| _ <| |_| | |___ | |___| |_| | | | | | | |_| | |\ | #|_| |_| \_\\___/ \____| |_____|____/___| |_| |___\___/|_| \_| ## ## A P-ROC Project by Dan Myers, Copyright 2013-2014 ## Built on the PyProcGame Framework from Adam Preble and Gerry Stellenberg ## Thanks to Scott Danesi for his Earthshaker Project, which is my starting point ################################################################################# ################################################################################# ## __ ___ __ ___ __________ ____________ ## / / / | / / / / | / / ____/ / / / ____/ __ \ ## / / / /| |/ / / / |/ / / / /_/ / __/ / /_/ / ## / /___/ ___ / /_/ / /| / /___/ __ / /___/ _, _/ ## /_____/_/ |_\____/_/ |_/\____/_/ /_/_____/_/ |_| ## ################################################################################# import procgame.game from procgame import * import pinproc class Launcher_Mode(game.Mode): def __init__(self, game): super(Launcher_Mode, self).__init__(game=game, priority=3) def mode_started(self): self.game.coils.flipperEnable.enable() def mode_stopped(self): pass def sw_startButton_active_for_50ms(self, sw): #if self.troughIsFull()==True: #Game Starting #self.game.modes.remove(self) return procgame.game.SwitchContinue def sw_outhole_active_for_1s(self, sw): self.game.coils.acSelect.disable() self.game.coils.flipperEnable.disable() self.game.coils.outholeKicker_Knocker.pulse(50) self.game.score_display.set_text("GAME OVER",0) self.game.score_display.set_text("Press Start",1) return procgame.game.SwitchStop def sw_jetLeft_active(self, sw): return procgame.game.SwitchStop def sw_jetRight_active(self, sw): return procgame.game.SwitchStop def sw_jetTop_active(self, sw): return procgame.game.SwitchStop def sw_slingL_active(self, sw): return procgame.game.SwitchStop def sw_slingR_active(self, sw): return procgame.game.SwitchStop
998,658
ad0ec59e8424b529a5c22a6686696056026056be
__doc__ = '''Comp 305 final project Group No: 21 Members: Altay Atalay Andrew Bond Project Name: Pokemon ''' from util import * def findLargestVertex(road_network: dict, cities_list: list) -> str: if len(cities_list) == 0: return largestVertex = cities_list[0] numEdges = len(road_network[largestVertex]) for i in range(1, len(cities_list)): length = len(road_network[cities_list[i]]) if length > numEdges: largestVertex = cities_list[i] numEdges = len(road_network[cities_list[i]]) return largestVertex def helper(road_network : dict, cities_list: list): centersList = [] while len(cities_list) > 0: largestVertex = findLargestVertex(road_network, cities_list) centersList.append(largestVertex) cities_list.remove(largestVertex) adjacent_cities = road_network[largestVertex] for city in adjacent_cities: if cities_list.__contains__(city): cities_list.remove(city) return centersList @timer('Greedy algorithm : ') def can_be_made_tournament_ready(road_network: dict, num_cities: int) -> list: '''The Algorithm ''' if type(num_cities) not in [int, float] or num_cities < 0: raise ValueError(f'num cities must be a positive int') rv: list = [] # return value, list of cities if num_cities == 0: return rv centersList = helper(road_network, list(road_network.keys())) if len(centersList) <= num_cities: print("The approximation algorithm found a solution that satisfies the number of cities constraint.") return centersList if __name__ == '__main__': num_cities: int = 10 print('Testing with pokemon-center-test1.txt') road_network = network_from_file('pokemon-center-test1.txt') #print(road_network) result = can_be_made_tournament_ready(road_network, num_cities) print(f'result = {result}') print('\n\nTesting with pokemon-center-test2.txt') road_network = network_from_file('pokemon-center-test2.txt') result = can_be_made_tournament_ready(road_network, num_cities) print(f'result = {result}')
998,659
153bfafba82c56af6f06cc116c59167f68b4dcd1
def s2b(inp): bn = [] for i in inp: b = bin(ord(i)).replace("0b", '') if len(b) < 8: b = "0"*(8 - len(b)) + b bn.append(b) res = "".join(bn) return res def Hamming(in1, in2): cnt = 0 res = [] for i,j in zip(in1, in2): tmp = int(i) ^ int(j) if tmp == 1: cnt += 1 res.append(cnt) ttl = sum(res) return ttl def edt(d, k): tes = {} for i in k: res = [] for j in range(0, len(d), i): if j+i < len(data): b1 = s2b(d[j:(j+i)]) b2 = s2b(d[j+i:(j+i)+i]) tmp = Hamming(b1, b2) res.append(tmp) tes[i] = sum(res)/(i*j) return min(tes, key=tes.get) def block_key(df, ky): v29 = {} blk = {} cnt = 1 for i in range(0,len(df)+1, ky): if i+ky <= len(df)+1: v29[cnt] = df[i:i+ky] cnt += 1 else: v29[cnt] = df[i:] for i in range(0, ky): for j in v29.values(): print j[i], file = open("chal6inp.txt", "r") data = file.read() strl = [] KEYSIZE = range(2, 41) for x in data: if x != '\n': strl.append(x) strc = "".join(strl) print block_key(strc, edt(strc.decode("base64"), KEYSIZE)) file.close()
998,660
afd22bd66083abf964c0d35221b53cd24da63cf6
import brickpi import time interface=brickpi.Interface() interface.initialize() motors = [0,1] interface.motorEnable(motors[0]) interface.motorEnable(motors[1]) motorParams = interface.MotorAngleControllerParameters() motorParams.maxRotationAcceleration = 6.0 motorParams.maxRotationSpeed = 12.0 motorParams.feedForwardGain = 255/17.0 motorParams.minPWM = 25.0 motorParams.pidParameters.minOutput = -255 motorParams.pidParameters.maxOutput = 255 motorParams.pidParameters.k_p = 600.0 motorParams.pidParameters.k_i = 500.0 motorParams.pidParameters.k_d = 200.0 interface.setMotorAngleControllerParameters(motors[0],motorParams) interface.setMotorAngleControllerParameters(motors[1],motorParams) def move(angle): interface.increaseMotorAngleReferences(motors,[angle,angle]) while not interface.motorAngleReferencesReached(motors) : time.sleep(0.1) return def left90deg(): angle = 5.4 interface.increaseMotorAngleReferences(motors,[-angle,angle]) while not interface.motorAngleReferencesReached(motors) : time.sleep(0.1) return def right90deg(): angle = -5.4 interface.increaseMotorAngleReferences(motors,[-angle,angle]) while not interface.motorAngleReferencesReached(motors) : time.sleep(0.1) return move(19.1) time.sleep(1) left90deg() time.sleep(1) move(19.1) time.sleep(1) left90deg() time.sleep(1) move(19.1) time.sleep(1) left90deg() time.sleep(1) move(19.1) time.sleep(1) left90deg() time.sleep(1) interface.terminate()
998,661
4bee6657d95f3572ef4cc46d02da28db8c35ad68
class Solution(object): def lengthOfLastWord(self, s): s = s.rstrip(' ').split(' ') return len(s[-1]) if s else 0
998,662
af8af7bcd1ff056e3ea46c7f2717b6b15b412bd7
# -*- coding: utf-8 -*- import unittest from common.myunit import StartEnd from page.home_page import MePage class Test_Personal_Center(StartEnd): def test_1message_center(self): M=MePage(self.driver) M.message_center() self.assertTrue(M.check_message_center()) def test_2nickname_edit(self): M = MePage(self.driver) M.nickname_editor() self.assertTrue(M.check_nickname_editor()) def test_3real_name(self): M = MePage(self.driver) M.input_real_name() self.assertTrue(M.check_input_real_name()) def test_4choose_sex(self): M = MePage(self.driver) M.choose_sex() self.assertTrue(M.check_choose_sex()) M.click_back() def test_5my_account(self): M = MePage(self.driver) M.my_account() if __name__ == '__main__': unittest.main(verbosity=2) # suite = unittest.TestSuite() # suite.addTest(Test_Personal_Center("test_1message_center")) # suite.addTest(Test_Personal_Center("test_3real_name")) # suite.addTest(Test_Personal_Center("test_2nickname_edit")) # suite.addTest(Test_Personal_Center("test_4choose_sex")) # suite.addTest(Test_Personal_Center("test_5my_account"))
998,663
c734611d0bcbc0bfb169c315180ae97c0bc1edc5
from django import forms from django.utils.translation import ugettext as _ from .models import Schedule # class AddEventForm(forms.ModelForm): # group = forms.CharField() # lesson = forms.CharField() # date_start = forms.CharField() # date_end = forms.CharField() # class Meta: # model = Schedule # fields = ('group', 'lesson', 'date_start', 'date_end')
998,664
f299b8b9b91121d025f32d3c3a430f228efb8ac0
# -*- coding: utf-8 -*- import importlib import logging import os import time import click from optimus.conf.loader import ( import_pages_module, import_settings_module, load_settings, ) from optimus.interfaces.build import builder_interface from optimus.interfaces.watch import watcher_interface from optimus.setup_project import setup_project from optimus.utils import display_settings @click.command("watch", short_help="Watch for changes in project sources") @click.option( "--basedir", metavar="PATH", type=click.Path(exists=True), help=( "Base directory where to search for settings file. " "Default value use current directory." ), default=os.getcwd(), ) @click.option( "--settings-name", metavar="NAME", help=( "Settings file name to use without '.py' extension. " "Default value is 'settings'." ), default="settings", ) @click.pass_context def watch_command(context, basedir, settings_name): """ Watch for changes in project sources to automatically build project ressources """ logger = logging.getLogger("optimus") # Set project before to be able to load its modules setup_project(basedir, settings_name) # Load current project settings and page map settings = import_settings_module(settings_name, basedir=basedir) # In test environment, force the module reload to avoid previous test cache to be # used (since the module have the same path). if context.obj["test_env"]: settings = importlib.reload(settings) settings = load_settings(settings) views = import_pages_module(settings.PAGES_MAP, basedir=basedir) if context.obj["test_env"]: views = importlib.reload(views) # Debug output display_settings( settings, ("DEBUG", "PROJECT_DIR", "SOURCES_DIR", "TEMPLATES_DIR", "LOCALES_DIR"), ) logger.debug("Trigger pages build to start") build_env = builder_interface(settings, views) # Init and configure observer with events observer = watcher_interface(settings, views, build_env) logger.warning("Starting to watch sources, use CTRL+C to stop it") # Do not start observer during tests since we cannot manage interruption and # watcher threads if not context.obj["test_env"]: observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: logger.warning("Stopping watcher..") observer.stop() observer.join()
998,665
53c315ff20493b1039d4e02012cfd90faa5eb1ab
import cv2 import numpy as np import matplotlib.pyplot as plt def cv2_bgr2rgb(bgr_img): return cv2.cvtColor(bgr_img, 4) # cv2.COLOR_BGR2RGB = 4 def apply_clahe(gray_img, **clahe_kwargs): clahe = cv2.createCLAHE(**clahe_kwargs) return clahe.apply(gray_img) def to_gray(cv2_img): return cv2.cvtColor(cv2_img, 6) # cv2.BGR2GRAY = 6 def eqlHist(gray_img): return cv2.equalizeHist(gray_img) def list_map(*args, **kwargs): return list(map(*args, **kwargs)) def arr_map(*args, dtype=np.float, **kwargs): return np.array(list_map(*args, **kwargs), dtype=dtype) #draw rectangles in image according to retangle-presenting vectors. def draw_rects_in_img(img, rect_presenting_vec, *rect_args, **rect_kwargs): assert len(np.shape(rect_presenting_vec))==2 and np.shape(rect_presenting_vec)[-1]==4 #rectangle_presenting_vectors are like [(x,y,w,h)]. for (x,y,w,h) in rect_presenting_vec: cv2.rectangle(img, (x,y), (x+w,y+h), *rect_args, **rect_kwargs) def plt_show_cv2_clr_img(cv2_color_img): img = cv2_bgr2rgb(cv2_color_img) plt.imshow(img, vmin=0, vmax=255) plt.show()
998,666
412ae7da66d7727ed9f54d111ae8cfa7d78c4949
""" Copyright 2018 EPAM Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from time import time from syndicate.commons.log_helper import get_logger, get_user_logger from syndicate.core.helper import unpack_kwargs from syndicate.core.resources.base_resource import BaseResource from syndicate.core.resources.helper import build_description_obj _LOG = get_logger('syndicate.core.resources.dynamo_db_resource') USER_LOG = get_user_logger() class DaxResource(BaseResource): def __init__(self, dax_conn, iam_conn): self.dax_conn = dax_conn self.iam_conn = iam_conn def create_cluster(self, args): return self.create_pool(self._create_cluster_from_meta, args) @unpack_kwargs def _create_cluster_from_meta(self, name, meta): role_name = meta['iam_role_name'] role_arn = self.iam_conn.check_if_role_exists(role_name) if not role_arn: message = f'Role {role_name} does not exist; ' \ f'Dax cluster {name} failed to be created.' _LOG.error(message) raise AssertionError(message) subnet_group_name = meta.get('subnet_group_name') subnet_ids = meta.get('subnet_ids') or [] if subnet_ids: _LOG.info(f'Subnet_ids \'{subnet_ids}\' were given. ' f'Creating Dax subnet group \'{subnet_group_name}\'') self.dax_conn.create_subnet_group( subnet_group_name=subnet_group_name, subnet_ids=subnet_ids ) _LOG.info(f'Dax subnet group with name {subnet_group_name} ' f'was created.') elif subnet_group_name: _LOG.info(f'Subnet_ids were not given. Assuming that subnet ' f'group \'{subnet_group_name}\' already exists') response = self.dax_conn.create_cluster( cluster_name=name, node_type=meta['node_type'], replication_factor=meta['replication_factor'], iam_role_arn=role_arn, subnet_group_name=subnet_group_name, cluster_endpoint_encryption_type=meta.get('cluster_endpoint_encryption_type'), security_group_ids=meta.get('security_group_ids') or [], parameter_group_name=meta.get('parameter_group_name'), availability_zones=meta.get('availability_zones') or [] ) if response: _LOG.info(f'Dax cluster \'{name}\' was successfully created') return self.describe_cluster(name, meta, response['Cluster']) else: _LOG.warning(f'Dax cluster \'{name}\' was not created because ' f'it already exists') return self.describe_cluster(name, meta) def describe_cluster(self, name, meta, response=None): if not response: response = self.dax_conn.describe_cluster(name) if response: arn = response['ClusterArn'] del response['ClusterArn'] return { arn: build_description_obj(response, name, meta) } def remove_cluster(self, args): return self.create_pool(self._remove_cluster, args) @unpack_kwargs def _remove_cluster(self, arn, config): cluster_name = config['resource_name'] try: self.dax_conn.delete_cluster(cluster_name) except self.dax_conn.client.exceptions.InvalidClusterStateFault as e: USER_LOG.warning(e.response['Error']['Message'] + ' Remove it manually!')
998,667
3b48506419868f284028b7601dc3cc951ae8f7d4
# Copyright © 2020 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Suite to ensure the PPR general collateral schema is valid. """ import copy from registry_schemas import validate from registry_schemas.example_data.ppr import GENERAL_COLLATERAL def test_valid_general_collateral(): """Assert that the schema is performing as expected.""" is_valid, errors = validate(GENERAL_COLLATERAL, 'generalCollateral', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_invalid_general_collateral_description(): """Assert that an invalid generalCollateral fails - description too short.""" collateral = copy.deepcopy(GENERAL_COLLATERAL) collateral['description'] = 'XX' is_valid, errors = validate(collateral, 'generalCollateral', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_general_collateral_added(): """Assert that an invalid generalCollateral fails - added date time invalid format.""" collateral = copy.deepcopy(GENERAL_COLLATERAL) collateral['addedDateTime'] = 'XXXXXXXXXXXX' is_valid, errors = validate(collateral, 'generalCollateral', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_general_collateral_missing_description(): """Assert that an invalid generalCollateral fails - description is missing.""" collateral = copy.deepcopy(GENERAL_COLLATERAL) del collateral['description'] is_valid, errors = validate(collateral, 'generalCollateral', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid
998,668
64eccb6d1cb1b0c09842c7280ea894538db5b0ad
# -*- coding: utf-8 -*- import math import time import gzip import fileinput import numpy as np from sklearn import pipeline from sklearn import linear_model from sklearn.svm import SVC from sklearn.cross_validation import KFold from sklearn.cross_validation import LeaveOneOut vectormap, vclass1, vclass2, vclass3 = {}, [], [], [] for line in fileinput.input("record.txt"): part = line.strip().split("\t") vectormap[part[2]] = [float(part[4].split(",")[0]), float(part[4].split(",")[1])] if part[1] == "1": vclass1.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "2": vclass2.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "3": vclass3.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) fileinput.close() print vclass1, vclass2, vclass3 i, X, y = 0, [], [] for line in fileinput.input("../../data/testset/news_seed.txt"): i += 1 print i words_title = [item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split("\t")[0].split(" ")])] s1 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words_title]) s2 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words_title]) s3 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words_title]) words_content = [item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split("\t")[1].split(" ")])] s4 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words_content]) s5 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words_content]) s6 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words_content]) X.append([float(s1*1000)/(len(words_title)+1), float(s2*1000)/(len(words_title)+1), float(s3*1000)/(len(words_title)+1), float(s4*1000)/(len(words_content)+1), float(s5*1000)/(len(words_content)+1), float(s6*1000)/(len(words_content)+1)]) y.append(1 if 0<=i<20 else 2 if 20<=i<40 else 3 if 40<=i<60 else 0) X, y = np.array(X), np.array(y) # total, right = 0, 0 # clf = SVC(kernel='linear', class_weight='auto') # clf.fit(X, y) # for i in xrange(len(X)): # total += 1 # if clf.predict(X[i]) == y[i]: # right += 1 # else: # print i, X[i], clf.predict(X[i]) # print float(right)/total total, right = 0, 0 for t in xrange(10): print t kf = KFold(len(X), n_folds=10) for train, test in kf: clf = SVC(kernel='linear', class_weight='auto') clf.fit(X[train], y[train]) for item in test: total += 1 if clf.predict(X[item]) == y[item]: right += 1 print float(right)/total # # 0.79 vectormap, vclass1, vclass2, vclass3 = {}, [], [], [] for line in fileinput.input("record.txt"): part = line.strip().split("\t") vectormap[part[2]] = [float(part[4].split(",")[0]), float(part[4].split(",")[1])] if part[1] == "1": vclass1.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "2": vclass2.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "3": vclass3.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) fileinput.close() print vclass1, vclass2, vclass3 i, X, y = 0, [], [] l1, l2, l3, l4 = [], [], [], [] for line in fileinput.input("../../data/testset/weibo_seed.txt"): i += 1 words = [item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split(" ")])] s1 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words]) s2 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words]) s3 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words]) # print i, float(s1)/(len(words)+1), float(s2)/(len(words)+1), float(s3)/(len(words)+1) X.append([float(s1*1000)/(len(words)+1), float(s2*1000)/(len(words)+1), float(s3*1000)/(len(words)+1)]) y.append(1 if 0<=i<20 else 2 if 20<=i<40 else 3 if 40<=i<60 else 0) if 0<i<=20: l1.append([float(s1*1000)/(len(words)+1), float(s2*1000)/(len(words)+1), float(s3*1000)/(len(words)+1)]) elif 20<i<=40: l2.append([float(s1*1000)/(len(words)+1), float(s2*1000)/(len(words)+1), float(s3*1000)/(len(words)+1)]) elif 40<i<=60: l3.append([float(s1*1000)/(len(words)+1), float(s2*1000)/(len(words)+1), float(s3*1000)/(len(words)+1)]) else: l4.append([float(s1*1000)/(len(words)+1), float(s2*1000)/(len(words)+1), float(s3*1000)/(len(words)+1)]) print i X, y = np.array(X), np.array(y) # total, right = 0, 0 # clf = SVC(kernel='linear', class_weight='auto') # clf.fit(X, y) # for i in xrange(len(X)): # total += 1 # if clf.predict(X[i]) == y[i]: # right += 1 # else: # print i, X[i], clf.predict(X[i]) # print float(right)/total total, right = 0, 0 for t in xrange(10): kf = KFold(len(X), n_folds=10) for train, test in kf: clf = SVC(kernel='linear', class_weight='auto') clf.fit(X[train], y[train]) for item in test: total += 1 if clf.predict(X[item]) == y[item]: right += 1 print float(right)/total # # 0.85 # 新闻分类 vectormap, exist, vclass1, vclass2, vclass3 = {}, {}, [], [], [] for line in fileinput.input("record.txt"): part = line.strip().split("\t") vectormap[part[2]] = [float(part[4].split(",")[0]), float(part[4].split(",")[1])] if part[1] == "1": exist[part[2]] = True vclass1.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "2": exist[part[2]] = True vclass2.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "3": exist[part[2]] = True vclass3.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) fileinput.close() print vclass1, vclass2, vclass3 i, X, y = 0, [], [] for line in fileinput.input("../../data/testset/news_seed.txt"): i += 1 print i words = [item[0] for item in filter(lambda x:exist.has_key(x[0]) and x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split(" ")])] length = len([item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split(" ")])]) words_title = [item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split("\t")[0].split(" ")])] s1 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words_title]) s2 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words_title]) s3 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words_title]) words_content = [item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split("\t")[1].split(" ")])] s4 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words_content]) s5 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words_content]) s6 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words_content]) X.append([float(s1*1000)/(len(words_title)+1), float(s2*1000)/(len(words_title)+1), float(s3*1000)/(len(words_title)+1), float(s4*1000)/(len(words_title)+1), float(s5*1000)/(len(words_title)+1), float(s6*1000)/(len(words_title)+1)]) y.append(1 if 0<=i<20 else 2 if 20<=i<40 else 3 if 40<=i<60 else 0) X, y = np.array(X), np.array(y) clf = SVC(kernel='linear', class_weight='auto') clf.fit(X, y) file1 = open("../../data/classify/tag_pos_t_lable_group_comp_1.seg.txt","w") file2 = open("../../data/classify/tag_neg_t_lable_group_comp_1.seg.txt","w") i = 0 for line in gzip.open("../../data/original/t_lable_group_comp_1.sort.seg.txt.gz"): i += 1 print i try: line.decode("utf-8") cid = line.strip().split("\t")[0][1:-1] day = (int(time.mktime(time.strptime(line.strip().split("\t")[1][1:-1],'%Y-%m-%d %H:%M:%S')))-int(time.mktime(time.strptime("2011-04-01 00:00:00",'%Y-%m-%d %H:%M:%S'))))/(24*3600) title = line.strip().split("\t")[4][1:-1].split(" ") content = line.strip().split("\t")[7][1:-1].split(" ") nr_map_t, ns_map_t = {}, {} for item in title: if len(item.split("/")) == 2 and item.split("/")[1] in ["nr","nr1","nr2","nrj","nrf"] and len(item.split("/")[0])/3>=2: nr_map_t[item.split("/")[0]] = 1 if not nr_map_t.has_key(item.split("/")[0]) else nr_map_t[item.split("/")[0]]+1 if len(item.split("/")) == 2 and item.split("/")[1] in ["ns","nsf"] and len(item.split("/")[0])/3>=2: ns_map_t[item.split("/")[0]] = 1 if not ns_map_t.has_key(item.split("/")[0]) else ns_map_t[item.split("/")[0]]+1 nr_map_c, ns_map_c = {}, {} for item in content: if len(item.split("/")) == 2 and item.split("/")[1] in ["nr","nr1","nr2","nrj","nrf"] and len(item.split("/")[0])/3>=2: nr_map_c[item.split("/")[0]] = 1 if not nr_map_c.has_key(item.split("/")[0]) else nr_map_c[item.split("/")[0]]+1 if len(item.split("/")) == 2 and item.split("/")[1] in ["ns","nsf"] and len(item.split("/")[0])/3>=2: ns_map_c[item.split("/")[0]] = 1 if not ns_map_c.has_key(item.split("/")[0]) else ns_map_c[item.split("/")[0]]+1 title_orig = "".join([item.split("/")[0] for item in title]) content = content[0:500] length_title = len([item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in title])]) words_title = [item[0] for item in filter(lambda x:exist.has_key(x[0]) and x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in title])] s1 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words_title]) s2 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words_title]) s3 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words_title]) length_content = len([item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in content])]) words_content = [item[0] for item in filter(lambda x:exist.has_key(x[0]) and x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in content])] s4 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words_content]) s5 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words_content]) s6 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words_content]) pred = clf.predict([float(s1*1000)/(len(words_title)+1), float(s2*1000)/(len(words_title)+1), float(s3*1000)/(len(words_title)+1), float(s4*1000)/(len(words_content)+1), float(s5*1000)/(len(words_content)+1), float(s6*1000)/(len(words_content)+1)]) if pred != 0: file1.write(cid+"\t"+str(pred[0])+"\t"+str(day)+"\t"+"\""+" ".join([k+":"+str(v) for k,v in nr_map_t.iteritems()])+"\""+"\t"+"\""+" ".join([k+":"+str(v) for k,v in ns_map_t.iteritems()])+"\""+"\t"+"\""+" ".join([k+":"+str(v) for k,v in nr_map_c.iteritems()])+"\""+"\t"+"\""+" ".join([k+":"+str(v) for k,v in ns_map_c.iteritems()])+"\""+"\t"+title_orig+"\n") else: file2.write(line) except: continue file1.close() file2.close() # 微博分类 vectormap, exist, vclass1, vclass2, vclass3 = {}, {}, [], [], [] for line in fileinput.input("record.txt"): part = line.strip().split("\t") vectormap[part[2]] = [float(part[4].split(",")[0]), float(part[4].split(",")[1])] if part[1] == "1": exist[part[2]] = True vclass1.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "2": exist[part[2]] = True vclass2.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) if part[1] == "3": exist[part[2]] = True vclass3.append([float(part[4].split(",")[0]), float(part[4].split(",")[1])]) fileinput.close() print vclass1, vclass2, vclass3 i, X, y = 0, [], [] for line in fileinput.input("../../data/testset/weibo_seed.txt"): i += 1 print i words = [item[0] for item in filter(lambda x:exist.has_key(x[0]) and x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split(" ")])] length = len([item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in line.strip().split(" ")])]) s1 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words]) s2 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words]) s3 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words]) X.append([float(s1*1000)/(length+1), float(s2*1000)/(length+1), float(s3*1000)/(length+1)]) y.append(1 if 0<=i<20 else 2 if 20<=i<40 else 3 if 40<=i<60 else 0) X, y = np.array(X), np.array(y) clf = SVC(kernel='linear', class_weight='auto') clf.fit(X, y) file1 = open("../../data/classify/tag_pos_t_lable_group_comp_4.seg.txt","w") file2 = open("../../data/classify/tag_neg_t_lable_group_comp_4.seg.txt","w") i = 0 for line in gzip.open("../../data/original/t_lable_group_comp_4.sort.seg.txt.gz"): i += 1 print i try: line.decode("utf-8") cid = line.strip().split("\t")[0][1:-1] day = (int(time.mktime(time.strptime(line.strip().split("\t")[1][1:-1],'%Y-%m-%d %H:%M:%S')))-int(time.mktime(time.strptime("2011-04-01 00:00:00",'%Y-%m-%d %H:%M:%S'))))/(24*3600) content = line.strip().split("\t")[5][1:-1].split(" ") nr_map_t, ns_map_t = {}, {} for item in content: if len(item.split("/")) == 2 and item.split("/")[1] in ["nr","nr1","nr2","nrj","nrf"]: nr_map_t[item.split("/")[0]] = 1 if not nr_map_t.has_key(item.split("/")[0]) else nr_map_t[item.split("/")[0]]+1 if len(item.split("/")) == 2 and item.split("/")[1] in ["ns","nsf"]: ns_map_t[item.split("/")[0]] = 1 if not ns_map_t.has_key(item.split("/")[0]) else ns_map_t[item.split("/")[0]]+1 text = [] for k,v in nr_map_t.iteritems(): try: text.extend([k.decode("utf-8")]*v) except: continue for k,v in ns_map_t.iteritems(): try: text.extend([k.decode("utf-8")]*v) except: continue content_orig = "".join([item.split("/")[0] for item in content]) words = [item[0] for item in filter(lambda x:exist.has_key(x[0]) and x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in content])] length = len([item[0] for item in filter(lambda x:x[1] in ["a","an","b","n","v","vn","vi"], [item.split("/") for item in content])]) s1 = sum([sum([math.exp(-sum([(vectormap[word][k]-c1[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c1 in vclass1])/math.sqrt(2*math.pi) for word in words]) s2 = sum([sum([math.exp(-sum([(vectormap[word][k]-c2[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c2 in vclass2])/math.sqrt(2*math.pi) for word in words]) s3 = sum([sum([math.exp(-sum([(vectormap[word][k]-c3[k])**2 if vectormap.has_key(word) else 100 for k in xrange(2)])/(2*1**2)) for c3 in vclass3])/math.sqrt(2*math.pi) for word in words]) pred = clf.predict([float(s1*1000)/(length+1), float(s2*1000)/(length+1), float(s3*1000)/(length+1)]) if pred != 0: file1.write(cid+"\t"+str(pred[0])+"\t"+str(day)+"\t"+"\""+" ".join([k+":"+str(v) for k,v in nr_map_t.iteritems()])+"\""+"\t"+"\""+" ".join([k+":"+str(v) for k,v in ns_map_t.iteritems()])+"\""+"\t"+content_orig+"\n") else: file2.write(line) except: continue file1.close() file2.close()
998,669
5c4edf78948855410393d99b72cff21125c824ee
import pandas as pd from pymongo import MongoClient def getQueryData(): client = MongoClient('mongodb://onega:27017') db = client.AutoRedGreen collection = db.QUERY data = pd.DataFrame(list(collection.find())) return data a = getQueryData() print(a)
998,670
b10f8ee802f96d4e9286d31c524189053d17e4fb
import time def signs_counter(sign,string): if len(sign)>1: return "Błąd" counter=string.count(sign) return counter,sign string=input("Podaj łańcuch znaków: ") sign=input("Podaj znak do znalezienia: ") start=time.time() result=signs_counter(sign,string) end=time.time() time=end-start print("Jest",result,"w tym łańcuchu") print("Czas wykonania tego programu to",time)
998,671
1c5af629fc50aeefbf1b54e1314ec5413035b4d3
#1) TMDN_Dataset #1.1) get genres & overview from movies #1.2) tokenize data using BERT_Tokenizer #1.3) make input_form : [CLS] + genres + [SEP] + overview + tagline + [SEP] #1.4) make token_type_ids #1.5) make attention_mask #1.6) make input be tensor #1.6.1) input_ids #1.6.2) token_type_ids #1.6.3) attention_mask #1.7) normalize label(make mean 1) #1.7.1) popularity #1.7.2) vote_average #1.7.3) vote_count #1.7.4) revenue #1.8) make input_target : popularity + vote_average * vote_count device = 'cuda' import math import torch import numpy as np from transformers import BertTokenizer, BertModel, BertConfig BERT_Tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) class TMDM_Dataset(torch.utils.data.Dataset): def __init__(self, movie_data, max_len, tokenizer): self.movie_data = movie_data self.max_len = max_len self.tokenizer = tokenizer self.encoded_cls = self.tokenizer.convert_tokens_to_ids('[CLS]') self.encoded_sep = self.tokenizer.convert_tokens_to_ids('[SEP]') self.encoded_pad = self.tokenizer.convert_tokens_to_ids('[PAD]') self.popularity_mean = 21.49230058817409 self.vote_average_mean = 6.092171559442011 self.vote_count_mean = 690.2179887570269 self.target_threshold = 0.95 def __len__(self): return len(self.movie_data) def __getitem__(self, index): #1.1) get genres & overview from movies # genres genres = self.movie_data.loc[index]['genres'] if genres == '[]': genres = '[{"id": -1, "name": "None"}]' genres = self.get_genres(genres) genres = ' '.join(genres) # overview overview = self.movie_data.loc[index]['overview'] # tagline tagline = self.movie_data.loc[index]['tagline'] #1.2) tokenize data using BERT_Tokenizer # genres_token & genres_encoded if self.check_null(genres) == True: genres = 'None' genres = genres.lower() genres_token = self.tokenizer.tokenize(genres) genres_encoded = self.tokenizer.encode(genres_token)[1:-1] # overview_token & overview_encoded if self.check_null(overview) == True or len(overview) == 1: overview = 'None' overview = overview.lower() overview_token = self.tokenizer.tokenize(overview) overview_encoded = self.tokenizer.encode(overview_token)[1:-1] # tagline_token & tagline_encoded if self.check_null(tagline) == True: tagline = 'None' tagline = tagline.lower() tagline_token = self.tokenizer.tokenize(tagline) tagline_encoded = self.tokenizer.encode(tagline_token)[1:-1] # content_encoded = overview_encoded + tagline_encoded content_encoded = overview_encoded + tagline_encoded #1.3) make input_form : [CLS] + genres + [SEP] + content + [SEP] if len(genres_encoded) + len(content_encoded) >= (self.max_len - 3): content_encoded = content_encoded[:(self.max_len - 3 - len(genres_encoded))] input_ids = [self.encoded_cls] + genres_encoded + [self.encoded_sep] + \ content_encoded + [self.encoded_sep] #1.4) make token_type_ids sent1_length = len(genres_encoded) sent2_length = len(content_encoded) token_type_ids = [0] * (sent1_length + 2) + [1] * (sent2_length + 1) #1.5) make attention_mask attention_mask = [1] * len(input_ids) #1.6) padding input to max_len & make tensor form # input_ids padding_length = self.max_len - len(input_ids) input_ids = self.padding(input_ids, self.encoded_pad, padding_length) input_ids = torch.tensor(input_ids) input_ids = input_ids.long() input_ids = input_ids.to(device) # token_type_ids token_type_ids = self.padding(token_type_ids, self.encoded_pad, padding_length) token_type_ids = torch.Tensor(token_type_ids) token_type_ids = token_type_ids.long() token_type_ids = token_type_ids.to(device) # attention_mask attention_mask = self.padding(attention_mask, self.encoded_pad, padding_length) attention_mask = torch.Tensor(attention_mask) attention_mask = attention_mask.long() attention_mask = attention_mask.to(device) #1.7) normalize label(make mean 1) #1.7.1) popularity popularity = self.movie_data.loc[index]['popularity'] popularity = np.divide(popularity, self.popularity_mean) #1.7.2) vote_average vote_average = self.movie_data.loc[index]['vote_average'] vote_average = np.divide(vote_average, self.vote_average_mean) #1.7.3) vote_count vote_count = self.movie_data.loc[index]['vote_count'] vote_count = np.divide(vote_count, self.vote_count_mean) #1.8) make input_target : popularity + vote_average * vote_count input_target = popularity + vote_average * vote_count if input_target >= self.target_threshold: input_target = torch.Tensor([1]) else: input_target = torch.Tensor([0]) input_target = input_target.to(device) # dictionary dictionary = {} dictionary['input_ids'] = input_ids dictionary['token_type_ids'] = token_type_ids dictionary['attention_mask'] = attention_mask dictionary['input_target'] = input_target return dictionary def get_genres(self, data): genres = [] for i in range(len(data)): if data[i] == ':' and data[i + 1] == ' ' and data[i + 2] == '"': j = i + 3 while data[j] != '"': genres.append(data[j]) j = j + 1 genres.append('/') genres = ''.join(genres) genres = genres.split('/')[:-1] return genres def padding(self, input, value, length): return input + [value] * length def check_null(self, input): if isinstance(input, str) == True: boolean = pd.isnull(input) elif isinstance(input, float) == True: boolean = math.isnan(input) return boolean
998,672
986ec9cfcc90946821a777854cd7c4f68b220088
import jwt def generate_jwt(playload,private_key): token = jwt.encode( playload, private_key, algorithm='RS256') return token
998,673
5d8e23a7c87730bb3b2b5412917a6f82fd936b40
import asyncio import time import wx from wxasync import WxAsyncApp, StartCoroutine from pynput.keyboard import Key, Controller from bleak import BleakScanner, BleakClient # Key assignments KEY_JUMP = 'a' KEY_LEAN_FORWARD = Key.right KEY_LEAN_BACKWARD = Key.left KEY_RED_TILE = 'b' KEY_GREEN_TILE = Key.down # Timing BUTTON_TIME_DEFAULT = 0.1 BUTTON_TIME_JUMP = 1.5 # BLE stuff LEGO_CHARACTERISTIC_UUID = "00001624-1212-efde-1623-785feabcd123" LEGO_SERVICE_UUID = "00001623-1212-efde-1623-785feabcd123" SUBSCRIBE_IMU_COMMAND = bytearray([0x0A, 0x00, 0x41, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01]) SUBSCRIBE_RGB_COMMAND = bytearray([0x0A, 0x00, 0x41, 0x01, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01]) # GUI class class MarioFrame(wx.Frame): def __init__(self, parent=None, id=-1, title="Lego Mario Keys"): wx.Frame.__init__(self, parent, id, title, size=(450, 100)) self.initGUI() self.controller = MarioController(self) StartCoroutine(self.controller.run(), self) def initGUI(self): panel = wx.Panel(self) font = wx.Font(15, wx.DEFAULT, wx.NORMAL, wx.DEFAULT) self.status_field = wx.StaticText(self, label="", style=wx.ALIGN_CENTER) self.status_field.SetFont(font) self.cam_field = wx.StaticText(self, label="", style=wx.ALIGN_LEFT, size=wx.Size(50, wx.DefaultCoord)) self.cam_field.SetFont(font) self.accel_field = wx.StaticText(self, label="", style=wx.ALIGN_LEFT, size=wx.Size(200, wx.DefaultCoord)) self.accel_field.SetFont(font) self.key_switch_label = wx.StaticText(self, label="Send keys: ", style=wx.ALIGN_RIGHT, size=wx.Size(100, wx.DefaultCoord)) self.key_switch_label.SetFont(font) self.key_switch = wx.CheckBox(self) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(self.status_field, flag=wx.ALL, border=5, ) hbox = wx.BoxSizer(wx.HORIZONTAL) hbox.Add(self.cam_field, flag=wx.ALL|wx.FIXED_MINSIZE, border=5) hbox.Add(self.accel_field, flag=wx.ALL|wx.FIXED_MINSIZE, border=5) hbox.Add(self.key_switch_label, flag=wx.ALL|wx.FIXED_MINSIZE, border=5) hbox.Add(self.key_switch, flag=wx.ALL, border=5) vbox.Add(hbox, flag=wx.ALL, border=5) self.SetSizer(vbox) # Class for the controller class MarioController: def __init__(self, gui): self.gui = gui self.keyboard = Controller() self.current_tile = 0 self.current_x = 0 self.current_y = 0 self.current_z = 0 self.is_connected = False def signed(char): return char - 256 if char > 127 else char async def process_keys(self): if self.is_connected and self.gui.key_switch.GetValue(): if self.current_tile == 1: self.keyboard.press(KEY_RED_TILE) await asyncio.sleep(BUTTON_TIME_DEFAULT) self.keyboard.release(KEY_RED_TILE) self.current_tile = 0 elif self.current_tile == 2: self.keyboard.press(KEY_GREEN_TILE) await asyncio.sleep(BUTTON_TIME_DEFAULT) self.keyboard.release(KEY_GREEN_TILE) self.current_tile = 0 if self.current_z > 10: self.keyboard.press(KEY_LEAN_BACKWARD) elif self.current_z < -10: self.keyboard.press(KEY_LEAN_FORWARD) else: self.keyboard.release(KEY_LEAN_BACKWARD) self.keyboard.release(KEY_LEAN_FORWARD) if self.current_x > 5: self.keyboard.press(KEY_JUMP) await asyncio.sleep(BUTTON_TIME_JUMP) self.keyboard.release(KEY_JUMP) await asyncio.sleep(0.05) def notification_handler(self, sender, data): # Camera sensor data if data[0] == 8: # RGB code if data[5] == 0x0: if data[4] == 0xb8: self.gui.cam_field.SetLabel("Start tile") self.current_tile = 3 if data[4] == 0xb7: self.gui.cam_field.SetLabel("Goal tile") self.current_tile = 4 print("Barcode: " + " ".join(hex(n) for n in data)) # Red tile elif data[6] == 0x15: self.gui.cam_field.SetLabel("Red tile") self.current_tile = 1 # Green tile elif data[6] == 0x25: self.gui.cam_field.SetLabel("Green tile") self.current_tile = 2 # No tile elif data[6] == 0x1a: self.gui.cam_field.SetLabel("No tile") self.current_tile = 0 # Accelerometer data elif data[0] == 7: self.current_x = int((self.current_x*0.5) + (MarioController.signed(data[4])*0.5)) self.current_y = int((self.current_y*0.5) + (MarioController.signed(data[5])*0.5)) self.current_z = int((self.current_z*0.5) + (MarioController.signed(data[6])*0.5)) self.gui.accel_field.SetLabel("X: %i | Y: %i | Z: %i" % (self.current_x, self.current_y, self.current_z)) async def run(self): while True: self.is_connected = False self.gui.status_field.SetLabel("Looking for Mario. Switch on and press Bluetooth key.") self.gui.cam_field.SetLabel("") self.gui.accel_field.SetLabel("") devices = await BleakScanner.discover() for d in devices: if d.name.lower().startswith("lego mario") or LEGO_SERVICE_UUID in d.metadata['uuids']: self.gui.status_field.SetLabel("Found Mario!") try: async with BleakClient(d.address) as client: await client.is_connected() self.gui.status_field.SetLabel("Mario is connected") self.is_connected = True await client.start_notify(LEGO_CHARACTERISTIC_UUID, self.notification_handler) await asyncio.sleep(0.1) await client.write_gatt_char(LEGO_CHARACTERISTIC_UUID, SUBSCRIBE_IMU_COMMAND) await asyncio.sleep(0.1) await client.write_gatt_char(LEGO_CHARACTERISTIC_UUID, SUBSCRIBE_RGB_COMMAND) while await client.is_connected(): await self.process_keys() except: pass # Run it if __name__ == "__main__": # The application object. app = WxAsyncApp() # The app frame frm = MarioFrame() # Drawing it frm.Show() # Start the main loop loop = asyncio.get_event_loop() loop.run_until_complete(app.MainLoop())
998,674
21b7bd39ea2fe136bb35619d87ea213ef5e119fc
ad = 6 # print the numbers till ad from 0 i = 0 while i < ad: i+=1 print(i)
998,675
362977621b8c476325e012195c95d02a8e0c6802
# -*- coding: utf-8 -*- """ Created on Wed Dec 25 12:30:08 2019 @author: ja2 """ # check version import tensorflow print(tensorflow.__version__)
998,676
b6284a6d13790e391ce2c525aaac4cd57b56e75d
import numpy as np import torch from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter import torchaudio.transforms as tfa_transforms import timm import data import models import yaml import nussl from ignite.engine import Events from yaml import Loader import os import logging from nussl import STFTParams import funcs # Hyperparameters seed = 0 dataset = 'musdb' toy_dataset = False sources = 'bdvo' # only valid for musdb imagenet_pretrained = False closure_key = 'hrnet_w18_small_v2' task = 'separation' window_length = 512 hop_length = 128 window_type = 'sqrt_hann' sample_rate = 22_050 stem = False skip = False spec_norm = None waveform_norm = None binary_mask = False resume = None batch_size = 8 minibatch_size = 8 learning_rate = .01 momentum = .9 weight_decay = .0005 autoclip = 0 epochs = 100 epoch_length = 100 valid_epoch_length = None poly_power = .9 num_workers = 8 device = 'cuda:0' optimizer = 'sgd' # Asserts assert batch_size >= minibatch_size # multiclass uses Cross Entropy loss, and this loss is not defined if a bucket does not belong to any source. assert not (binary_mask and len(sources) < 4) # Seeding torch.manual_seed(seed) np.random.seed(seed) # Setup Logging logger = logging.getLogger('train') logger.info(f"The code is being run from {os.getcwd()}") os.mkdir('tensorboard') writer = SummaryWriter(log_dir='tensorboard') # Dataset if dataset == 'musdb': with open('.guild/sourcecode/data_conf/musdb_args.yml') as s: kwargs = yaml.load(s) if toy_dataset: kwargs['num_tracks'] = 1 train_dataset, val_dataset = data.build_musdb(False, **kwargs, sources=sources) if toy_dataset: val_dataset = train_dataset elif dataset == 'openmic': with open('.guild/sourcecode/data_conf/openmic_args.yml') as s: kwargs = yaml.load(s) train_dataset, val_dataset = data.build_openmic(False, **kwargs) elif dataset == 'mtg_jamendo': with open('.guild/sourcecode/data_conf/mtg_jamendo_args.yml') as s: kwargs = yaml.load(s) train_dataset, val_dataset = data.build_mtg_jamendo(False, **kwargs) # Model and Optimizer model = models.HRNet( closure_key, train_dataset.num_classes, pretrained=imagenet_pretrained, stft_params=STFTParams(window_length=window_length, hop_length=hop_length, window_type=window_type), head=task, stem=stem, audio_channels=1, skip=skip, spec_norm=spec_norm, waveform_norm=waveform_norm, binary_mask=binary_mask ).to(device) if optimizer == 'sgd': optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=weight_decay) elif optimizer == 'adam': optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) if resume: resume_model_path = os.path.join(resume, 'checkpoints/best.model.pth') model_state_dict = torch.load(resume_model_path) models.state_dict_back_compat(model_state_dict) model.load_state_dict(model_state_dict) resume_optimizer_path = os.path.join(resume, 'checkpoints/best.optimizer.pth') optimizer_state_dict = torch.load(resume_optimizer_path) optimizer.load_state_dict(optimizer_state_dict) resampler = tfa_transforms.Resample( train_dataset.sample_rate, sample_rate ).to(device) if task == 'separation': sisdr = funcs.SISDR() recon_loss = funcs.ReconstructionLoss() # Training Setup train_dataloader = DataLoader(train_dataset, num_workers=num_workers, batch_size=batch_size, shuffle=True) val_dataloader = DataLoader(val_dataset, num_workers=num_workers, batch_size=minibatch_size, shuffle=True) def dict_to_item(d : dict): for k, v in d.items(): d[k] = v.item() return d def to_minibatch(batch: dict[torch.Tensor]): minibatches = [{}] * int(batch['mix_audio'].shape[0] / minibatch_size + 1) for k, v in batch.items(): if isinstance(v, torch.Tensor): tensors = torch.split(v, minibatch_size, dim=0) for i, tensor in enumerate(tensors): minibatches[i][k] = tensor else: for minibatch in minibatches: minibatch[k] = v return minibatches def minibatches_loss(minibatches): batch_loss_dict = {} for batch in minibatches: funcs.resample_batch(batch, resampler) output = model(batch['mix_audio']) if task == 'separation': model.stft.direction = 'transform' loss_dict = { 'loss': recon_loss(output, batch, model.stft), 'si-sdr': sisdr(output, batch) } elif task == 'classification': loss_dict = { 'loss': funcs.classification_loss(output, batch) } elif task == 'segmentation': raise NotImplementedError("segmentation task not supported.") loss_dict['loss'].backward() for k, v in loss_dict.items(): batch_loss_dict[k] = (v * batch['mix_audio'].shape[0] / batch_size + batch_loss_dict.get(k, 0)) return batch_loss_dict def train_step(engine, batch): model.train() model.zero_grad(set_to_none=True) torch.cuda.empty_cache() minibatches = to_minibatch(batch) loss_dict = minibatches_loss(minibatches) optimizer.step() return dict_to_item(loss_dict) def val_step(engine, batch): with torch.no_grad(): model.eval() funcs.resample_batch(batch, resampler) output = model(batch['mix_audio']) if task == 'separation': model.stft.direction = 'transform' loss_dict = { 'loss': recon_loss(output, batch, model.stft), 'si-sdr': sisdr(output, batch) } elif task == 'classification': loss_dict = { 'loss': funcs.classification_loss(output, batch) } elif task == 'segmentation': raise NotImplementedError("segmentation task not supported.") return dict_to_item(loss_dict) trainer, validator = nussl.ml.train.create_train_and_validation_engines( train_step, val_step, device=device ) # Ignite Handlers @trainer.on(Events.ITERATION_COMPLETED) def on_iteration_completed(engine): # Log iteration metrics for key in engine.state.iter_history: if engine.state.iteration % 1 == 0: writer.add_scalar( 'iter/' + key, engine.state.iter_history[key][-1], engine.state.iteration ) # Poly-Learning Rate poly_lr = (learning_rate * (1 - engine.state.iteration / max_iterations) ** poly_power) for g in optimizer.param_groups: g['lr'] = poly_lr if valid_epoch_length is not None: @trainer.on(Events.EPOCH_STARTED) def load_validator_state_dict(_): # Load validation epoch length validator.load_state_dict({ 'iteration': 0, 'max_epochs': 1, 'epoch_length': valid_epoch_length }) @trainer.on(nussl.ml.train.ValidationEvents.VALIDATION_COMPLETED) def on_epoch_completed(engine): # Log validation metrics for key in engine.state.epoch_history: writer.add_scalar( key, engine.state.epoch_history[key][-1], engine.state.epoch ) if 0 < autoclip < 100: funcs.add_autoclip_gradient_handler(trainer, model, autoclip) max_iterations = epochs * epoch_length nussl.ml.train.add_stdout_handler(trainer, validator) nussl.ml.train.add_validate_and_checkpoint(os.getcwd(), model, optimizer, train_dataset, trainer, val_dataloader, validator) nussl.ml.train.add_progress_bar_handler(validator) nussl.ml.train.add_progress_bar_handler(trainer) trainer.run(train_dataloader, epoch_length=epoch_length, max_epochs=epochs)
998,677
b728afe0a8d6a1a213ece66a8a83f50345540b15
#/usr/bin/env python #import numpy as np #import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap #http://stackoverflow.com/questions/3279560/invert-colormap-in-matplotlib def reverse_colourmap(cmap, name = 'my_cmap_r'): """ In: cmap, name Out: my_cmap_r Explanation: t[0] goes from 0 to 1 row i: x y0 y1 -> t[0] t[1] t[2] / / row i+1: x y0 y1 -> t[n] t[1] t[2] so the inverse should do the same: row i+1: x y1 y0 -> 1-t[0] t[2] t[1] / / row i: x y1 y0 -> 1-t[n] t[2] t[1] """ reverse = [] k = [] for key in cmap._segmentdata: k.append(key) channel = cmap._segmentdata[key] data = [] for t in channel: data.append((1-t[0],t[2],t[1])) reverse.append(sorted(data)) LinearL = dict(zip(k,reverse)) my_cmap_r = LinearSegmentedColormap(name, LinearL) return my_cmap_r #http://cresspahl.blogspot.com/2012/03/expanded-control-of-octaves-colormap.html #http://nbviewer.jupyter.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb # MR=[0,0; # 0.02,0.3; %this is the important extra point # 0.3,1; # 1,1]; # MG=[0,0; # 0.3,0; # 0.7,1; # 1,1]; # MB=[0,0; # 0.7,0; # 1,1]; def cool(): cdict1 = {'blue': ((0.0, 0.0, 0.0), (0.02, 0.3, 0.3), (0.3, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (0.3, 0.0, 0.0), (0.7, 1.0, 1.0), (1.0, 1.0, 1.0)), 'red': ((0.0, 0.0, 0.0), (0.7, 0.0, 0.0), (1.0, 1.0, 1.0)) } blue_red1 = LinearSegmentedColormap('cool', cdict1) return blue_red1 def cool_r(): cdict1 = {'blue': ((0.0, 1.0, 1.0), (0.7, 1.0, 1.0), (0.98, 0.3, 0.3), (1.0, 0.0, 0.0)), 'green': ((0.0, 1.0, 1.0), (0.3, 1.0, 1.0), (0.7, 0.0, 0.0), (1.0, 0.0, 0.0)), 'red': ((0.0, 1.0, 1.0), (0.3, 0.0, 0.0), (1.0, 0.0, 0.0)) } blue_red1 = LinearSegmentedColormap('cool_r', cdict1) return blue_red1 # from Callies def blues(): cdict = { 'red': ((0.0, 0.0, 0.0), (0.5, 0.216, 0.216), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (0.5, 0.494, 0.494), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.0), (0.5, 0.722, 0.722), (1.0, 1.0, 1.0))} cm_blues = LinearSegmentedColormap('blues', cdict, 256) return cm_blues def blues_r(): cdict = { 'red': ((0.0, 1.0, 1.0), (0.5, 0.216, 0.216), (1.0, 0.0, 0.0)), 'green': ((0.0, 1.0, 1.0), (0.5, 0.494, 0.494), (1.0, 0.0, 0.0)), 'blue': ((0.0, 1.0, 1.0), (0.5, 0.722, 0.722), (1.0, 0.0, 0.0))} cm_blues = LinearSegmentedColormap('blues_r', cdict, 256) return cm_blues # def cool_r(): # return reverse_colourmap(cool) #x = np.arange(0, np.pi, 0.1) #y = np.arange(0, 2*np.pi, 0.1) #X, Y = np.meshgrid(x, y) #Z = np.cos(X) * np.sin(Y) * 10 #plt.imshow(Z, interpolation='nearest', cmap=blue_red1) #plt.colorbar()
998,678
8d4c88dfd49d32dd20c35bebd6a865eb0801e646
# -*- coding: utf-8 -*- import cookielib import urllib2 import urllib import requests import json import base64 from poster.encode import multipart_encode from poster.streaminghttp import register_openers import inspect from odoo.exceptions import ValidationError class NodeAction: DELETE_NODE = 0 UPDATE_NODE = 1 GET_NODE = 2 LIST_NODE_CHILDREN = 3 CREATE_NODE = 4 UPDATE_NODE_CONTENT = 5 GET_NODE_CONTENT = 6 class Client: BASE_URL = 'alfresco/api/-default-/public' # Login LOGIN_URL = 'authentication/versions/1/tickets' LOGOUT_URL = 'authentication/versions/1/tickets/-me-' VALIDATE_TICKET = 'authentication/versions/1/tickets/-me-' # Sites GET_ALL_SITES = 'alfresco/versions/1/sites' CREATE_SITE = 'alfresco/versions/1/sites' UPDATE_SITE = 'alfresco/versions/1/sites' DELETE_SITE = 'alfresco/versions/1/sites' CREATE_SITE_MEMBERSHIP = 'alfresco/versions/1/sites' # Groups ADD_GROUP_TO_SITE = 'alfresco/service/api/sites' REMOVE_GROUP_FROM_SITE = 'alfresco/service/api/sites' # Users CREATE_USER = 'alfresco/versions/1/people' UPDATE_USER = 'alfresco/versions/1/people' DELETE_USER = '/alfresco/service/api/people' GET_USER_GROUPS = 'alfresco/service/api/people' # Nodes CREATE_NODE = 'alfresco/versions/1/nodes/-my-/children' NODE_SHARE_URL = 'alfresco/service/api/sites/shareUrl' NODE_CONTENT = 'alfresco/versions/1/nodes' CREATE_USER_FIELDS = {'id':'required', 'firstName':'required', 'lastName':False, 'description':False, 'email':'required', 'jobTitle':False, 'location':False, 'mobile':False, 'userStatus':False, 'enabled':False, 'password':'required'} UPDATE_USER_FIELDS = {'id':'required', 'firstName':False,'lastName':False, 'description':False, 'email':False, 'jobTitle':False, 'location':False, 'mobile':False, 'userStatus':False, 'enabled':False, 'password':False, 'oldPassword':False} def __init__(self, url='http://localhost:8080',debug=0): self.cookies = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPHandler(),urllib2.HTTPCookieProcessor(self.cookies)) self.headers = {'Content-Type':'application/json', 'accept':'application/json'} self.rootUrl = url self.url = '%s/%s' % (url.rstrip('/'),Client.BASE_URL) self.login = None self.password = None self.ticket = None self.opener = opener self.instance = AlfrescoInstance(self.url) self.debug = debug def loginUser(self, login, password): loginUrl = '%s/%s' % (self.url, Client.LOGIN_URL) data = json.dumps({ "userId": login,"password": password}) request = AlfrescoRequest(loginUrl, data=data, headers=self.headers, method='POST') serverResponse = None response = {'success':False,'error':[]} try: if self.debug == 1: print 'Try to connect as %s' % login serverResponse = self.opener.open(request) if self.debug == 1: print 'Connection success !! Code : %s'% serverResponse.getcode() except urllib2.HTTPError ,e: if e.code == 400: if self.debug == 1: print 'Login or password is not provided' response['error'].append('Login or password is not provided') elif e.code == 403: if self.debug == 1: print 'Login failed' response['error'].append('Login failed') elif e.code == 110: if self.debug == 1: print 'Request time out' response['error'].append('Request time out') elif e.code == 111: if self.debug == 1: print 'Connection refused' response['error'].append('Connection refused') else: printException(e) except urllib2.URLError, e: if self.debug == 1: print 'Unable to open url, Connection refused' response['error'].append('Unable to open url, Connection refused') if serverResponse is not None: jsonData = serverResponse.read() if self.debug == 1: print jsonData data = json.loads(jsonData) self.ticket = data['entry']['id'] self.login = data['entry']['userId'] self.password = password # Insert ticket in header for any later request for the same session self.headers['Authorization'] = "Basic %s"% base64.b64encode(self.ticket) serverResponse.close() response['success'] = True return response else: return response def reconnect(self): if not self.login or not self.password: raise ValidationError('Unable to reconnect to alfresco server') loginUrl = '%s/%s' % (self.url, Client.LOGIN_URL) data = json.dumps({ "userId": self.login,"password": self.password}) request = AlfrescoRequest(loginUrl, data=data, headers=self.headers, method='POST') serverResponse = None response = {'success':False,'error':[]} try: if self.debug == 1: print 'Try to connect as %s' % login serverResponse = self.opener.open(request) if self.debug == 1: print 'Connection success !! Code : %s'% serverResponse.getcode() except urllib2.HTTPError ,e: if e.code == 400: if self.debug == 1: print 'Login or password is not provided' response['error'].append('Login or password is not provided') elif e.code == 403: if self.debug == 1: print 'Login failed' response['error'].append('Login failed') elif e.code == 110: if self.debug == 1: print 'Request time out' response['error'].append('Request time out') elif e.code == 111: if self.debug == 1: print 'Connection refused' response['error'].append('Connection refused') else: printException(e) except urllib2.URLError, e: if self.debug == 1: print 'Unable to open url, Connection refused' response['error'].append('Unable to open url, Connection refused') if serverResponse is not None: jsonData = serverResponse.read() if self.debug == 1: print jsonData data = json.loads(jsonData) self.ticket = data['entry']['id'] # Insert ticket in header for any later request for the same session self.headers['Authorization'] = "Basic %s"% base64.b64encode(self.ticket) serverResponse.close() response['success'] = True return response else: return response def logoutUser(self): logoutUrl = '%s/%s' % (self.url, Client.LOGOUT_URL) request = AlfrescoRequest(logoutUrl, headers=self.headers, method='DELETE') serverResponse = None response = {'success':False,'error':[]} try: if self.debug == 1: print 'Try to disconnect user ...' serverResponse = self.opener.open(request) except urllib2.HTTPError ,e: if e.code == 400: if self.debug == 1: print 'URL path does not include -me- or the ticket is not provided by the Authorization header' response['error'].append('URL path does not include -me- or the ticket is not provided by the Authorization header') elif e.code == 404: if self.debug == 1: print 'Status of the user has changed (for example, the user is locked or the account is disabled) or the ticket has expired' response['error'].append('Status of the user has changed (for example, the user is locked or the account is disabled) or the ticket has expired') else: printException(e) except urllib2.URLError, e: if self.debug == 1: print 'Unable to open url, Connection refused' response['error'].append('Unable to open url, Connection refused') if serverResponse is not None: self.ticket = None self.login = None self.password = None # Insert ticket in header for any later request for the same session #self.headers['Authorization'] = "Basic %s"% base64.b64encode(self.ticket) #serverResponse.close() response['success'] = True return response else: return response def validateSession(self): validateUrl = '%s/%s' % (self.url, Client.VALIDATE_TICKET) request = AlfrescoRequest(validateUrl, headers=self.headers, method='GET') serverResponse = None response = {'success':False,'error':[]} try: serverResponse = self.opener.open(request) except urllib2.HTTPError ,e: if e.code == 400: if self.debug == 1: print 'URL path does not include -me- or the ticket is not provided by the Authorization header' response['error'].append('URL path does not include -me- or the ticket is not provided by the Authorization header') elif e.code == 401: if self.debug == 1: print 'Authentication failed' response['error'].append('Authentication failed') elif e.code == 404: if self.debug == 1: print 'The request is authorized correctly but the status of the user (of the supplied ticket) has changed (for example, the user is locked or the account is disabled) or the ticket has expired' response['error'].append('Request time out') else: printException(e) except urllib2.URLError: print 'validateSession ------ > Unable to reach server' if serverResponse is not None: jsonData = serverResponse.read() serverResponse.close() response['success'] = True return response else: return response # ---------------------------------------------------- USER ----------------------------------------------------------------------- def createUser(self,userData): """ @param: dict() contains keys recognized by the alfresco REST API items : 'id', 'firstName', 'email', 'password' are required @return: response dict() with two items 'success': equals True when everything goes right, False otherwise 'error': contains dict() containing the errors returned from alfresco @Exceptions: ValidationError if one of the required items is missing """ createUserUrl = '%s/%s' % (self.url, Client.CREATE_USER) user = {} response = {'success':True, 'error':None} for key, value in userData.items(): # accept only recognized fields if key in self.CREATE_USER_FIELDS.keys(): user[key] = value for key, value in {k:v for k, v in self.CREATE_USER_FIELDS.items() if v == 'required'}.items(): if key not in user: raise ValidationError('field %s is required to create a new user in alfresco'%key) request = AlfrescoRequest(createUserUrl,data=json.dumps(user), headers=self.headers,method='POST') serverResponse = None try: print 'Creating a user ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: response['error'] = 'Invalid parameter: personBodyCreate is invalid' print 'Invalid parameter: personBodyCreate is invalid' elif e.code == 401: response['error'] = 'Authentication failed' print 'Authentication failed' elif e.code == 403: response['error'] = 'Current user does not have permission to create a person' print 'Current user does not have permission to create a person' elif e.code == 409: response['error'] = 'Person within given id already exists' print 'Person within given id already exists' elif e.code == 422: response['error'] = 'Model integrity exception' print 'Model integrity exception' else: printException(e) except urllib2.URLError: print 'createUser ------ > Unable to reach server' if serverResponse is not None: serverResponse.close() return response else: response['success'] = False() return response def updateUser(self,userData): """ @param: dict() contains keys recognized by the alfresco REST API items : 'id' is required @return: response dict() with two items 'success': equals True when everything goes right, False otherwise 'error': contains dict() containing the errors returned from alfresco @Exceptions: ValidationError if 'id' item is missing """ user = {} response = {'success':True, 'error':None} for key, value in userData.items(): # accept only recognized fields if key in self.UPDATE_USER_FIELDS.keys(): if isinstance(value,bool): if value: user[key] = 'true' else: user[key] = 'false' else: user[key] = str(value) for key, value in {k:v for k, v in self.UPDATE_USER_FIELDS.items() if v == 'required'}.items(): if key not in user: raise ValidationError('field %s is required to update a user in alfresco'%key) updateUrl = '%s/%s/%s' % (self.url, Client.UPDATE_USER,user['id']) # user id is supplied in the url i must be deleted from json object del(user['id']) print 'CONTENT OF USER TO UPDATE: ',user print 'CONTENT OF URL TO UPDATE: ',updateUrl request = AlfrescoRequest(updateUrl,data=json.dumps(user), headers=self.headers,method='PUT') serverResponse = None try: print 'Updating a user ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: response['error'] = 'Updating user: the update request is invalid or personId is not a valid format or personBodyUpdate is invalid' print 'Updating user: the update request is invalid or personId is not a valid format or personBodyUpdate is invalid' elif e.code == 401: response['error'] = 'Updating user: Authentication failed' print 'Updating user: Authentication failed' elif e.code == 403: response['error'] = 'Updating user: Current user does not have permission to update a person' print 'Updating user: Current user does not have permission to update a person' elif e.code == 404: response['error'] = 'Updating user: personId does not exist' print 'Updating user: personId does not exist' elif e.code == 422: response['error'] = 'Updating user: Model integrity exception' print 'Updating user: Model integrity exception' else: printException(e) except urllib2.URLError: print 'updateUser ------ > Unable to reach server' if serverResponse is not None: serverResponse.close() return response else: response['success'] =False return response def deleteUser(self, userId): """ @param: userId of the user to delete (required) and must be an str() object @return: response dict() with two items 'success': equals True when everything goes right, False otherwise 'error': contains dict() containing the errors returned from alfresco @Exceptions: ValidationError if 'userId' param is missing """ response = {'success':True, 'error':None} if not isinstance(userId, str) or not len(userId): raise ValidationError('Deleting User: userId cannot be Null') deleteUrl = '%s/%s/%s' % (self.rootUrl, Client.DELETE_USER, str(userId)) request = AlfrescoRequest(deleteUrl, headers=self.headers, method='DELETE') serverResponse = None try: print 'Deleting a user ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: response['error'] = 'Updating user: the update request is invalid or personId is not a valid format or personBodyUpdate is invalid' print 'Updating user: the update request is invalid or personId is not a valid format or personBodyUpdate is invalid' elif e.code == 401: response['error'] = 'Deleting user: Current user does not have permission to delete a person' print 'Deleting user: Current user does not have permission to delete a person' elif e.code == 404: response['error'] = 'Deleting user: personId does not exist' print 'Deleting user: personId does not exist' else: printException(e) except urllib2.URLError: print 'updateUser ------ > Unable to reach server' if serverResponse is not None: serverResponse.close() return response else: response['success'] =False return response def getUserGroups(self, userId): # alfresco/service/api/people response = {'success':True, 'error':None, 'groups':None} getGroupsUrl = '%s/%s/%s?groups=true' % (self.rootUrl, Client.GET_USER_GROUPS, str(userId)) request = AlfrescoRequest(getGroupsUrl, headers=self.headers, method='GET') serverResponse = None try: print 'obtaining users groups ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: response['error'] = 'Updating user: the update request is invalid or personId is not a valid format or personBodyUpdate is invalid' print 'Updating user: the update request is invalid or personId is not a valid format or personBodyUpdate is invalid' elif e.code == 401: response['error'] = 'Deleting user: Current user does not have permission to delete a person' print 'Deleting user: Current user does not have permission to delete a person' elif e.code == 404: response['error'] = 'Deleting user: personId does not exist' print 'Deleting user: personId does not exist' else: printException(e) except urllib2.URLError: print 'updateUser ------ > Unable to reach server' if serverResponse is not None and serverResponse.getcode() == 200: response['groups'] = jsonResponse['groups'] serverResponse.close() return response else: response['success'] = False return response # ---------------------------------------------------- SITE ----------------------------------------------------------------------- def deleteSite(self, siteId=None, permanent=False): if siteId is None: raise ValueError('site ID is required !') if permanent == True: permanent = 'true' else: permanent = 'false' deleteSiteUrl = '%s/%s/%s?permanent=%s' % (self.url, Client.DELETE_SITE, siteId, permanent) request = AlfrescoRequest(deleteSiteUrl, headers=self.headers, method='DELETE') serverResponse = None response = {'success':False, 'response':{}, 'error':[]} try: print 'Deleting a site ...' serverResponse = self.opener.open(request) except urllib2.HTTPError, e: if e.code == 204: if self.debug == 1: print 'Successful response Site deleted' response['error'].append('Invalid parameter: id, title, or description exceed the maximum length; or id contains invalid characters; or siteBodyCreate invalid') elif e.code == 401: if self.debug == 1: print 'Authentication failed' response['error'].append('Authentication failed') elif e.code == 403: if self.debug == 1: print 'Current user does not have permission to delete the site that is visible to them' response['error'].append('Site with the given identifier already exists') elif e.code == 404: if self.debug == 1: print 'siteId does not exist' response['error'].append('siteId does not exist') else: printException(e) def createSite(self, siteData=None, skipConfiguration=False, skipAddToFavorites=False): self.validateSession() """ @param siteData: {"id": "SiteId","title": "SiteTitle","description": "description of site","visibility": "PRIVATE"} """ if skipConfiguration == False: skipConfiguration = 'false' else: skipConfiguration = 'true' if skipAddToFavorites == False: skipAddToFavorites = 'false' else: skipAddToFavorites = 'true' createSiteUrl = '%s/%s?skipConfiguration=%s&skipAddToFavorites=%s' % (self.url,Client.CREATE_SITE, skipConfiguration, skipAddToFavorites) if 'id' not in siteData: raise ValueError('site ID is required !') elif 'title' not in siteData: raise ValueError('site Title is required !') elif 'visibility' not in siteData: raise ValueError('site Visibility is required !') request = AlfrescoRequest(createSiteUrl,data=json.dumps(siteData), headers=self.headers,method='POST') serverResponse = None response = {'success':False, 'response':{}, 'error':[]} try: print 'Creating a site ...' serverResponse = self.opener.open(request) except urllib2.HTTPError, e: if e.code == 400: if self.debug == 1: print 'Invalid parameter: id, title, or description exceed the maximum length; or id contains invalid characters; or siteBodyCreate invalid' response['error'].append('Invalid parameter: id, title, or description exceed the maximum length; or id contains invalid characters; or siteBodyCreate invalid') elif e.code == 401: if self.debug == 1: print 'Authentication failed' response['error'].append('Authentication failed') elif e.code == 409: if self.debug == 1: print 'Site with the given identifier already exists' response['error'].append('Site with the given identifier already exists') else: printException(e) if serverResponse is not None: data = json.loads(serverResponse.read()) if self.debug == 1: print json.dumps(data, indent=4, sort_keys=True) response['response']['guid'] = data['entry']['guid'] response['response']['id'] = data['entry']['id'] response['response']['title'] = data['entry']['title'] response['response']['visibility'] = data['entry']['visibility'] response['response']['preset'] = data['entry']['preset'] response['response']['role'] = data['entry']['role'] response['success'] = True serverResponse.close() return response else: return response def updateSite(self, siteId=None, siteData=None): self.validateSession() """ @param siteData: {"title": "SiteTitle","description": "description of site","visibility": "PRIVATE"} """ if siteId is None: raise ValueError('Site ID is required !') createSiteUrl = '%s/%s/%s' % (self.url, Client.CREATE_SITE, siteId) print '------------- siteID %s'% createSiteUrl request = AlfrescoRequest(createSiteUrl, data=json.dumps(siteData), headers=self.headers, method='PUT') response = None try: print 'Updating a site ...' response = self.opener.open(request) jsonResponse = json.loads(response.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: id, title, or description exceed the maximum length; or id contains invalid characters; or siteBodyCreate invalid' elif e.code == 401: print 'Authentication failed' elif e.code == 409: print 'Site with the given identifier already exists' elif e.code == 403: print 'Current user does not have permission to update the site that is visible to them.' elif e.code == 404: print 'siteId does not exist' else: printException(e) def getAllSites(self): getAllSitesUrl = '%s/%s' % (self.url, Client.GET_ALL_SITES) response = {'success':True, 'error':None, 'sites':None} serverResponse = None request = AlfrescoRequest(getAllSitesUrl, headers=self.headers, method='GET') try: print 'get site list ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) #print json.dumps(jsonResponse, indent=4, sort_keys=True) response['sites'] = jsonResponse['list']['entries'] except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: value of maxItems, skipCount, orderBy, or where is invalid' response['error'] = 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' elif e.code == 401: print 'Authentication failed' response['error'] = 'Iuthentication failed' else: printException(e) if serverResponse is not None and serverResponse.getcode() == 200: return response else: response['success'] = False return response # ---------------------------------------------------- GROUP ----------------------------------------------------------------------- def addGroupToSite(self, siteId, groupFullName, role='SiteConsumer'): """ groupFullName must begin with GROUP_ exp: GROUP_students_managers """ addGroupUrl = '%s/%s/%s/memberships'%(self.rootUrl, Client.ADD_GROUP_TO_SITE,siteId) data = { 'group':{ 'fullName':groupFullName }, 'role':role } print '##URL ',addGroupUrl print '## data ',data response = {'success':True, 'error':None} serverResponse = None request = AlfrescoRequest(addGroupUrl, data=json.dumps(data), headers=self.headers, method='POST') try: print 'adding group to site ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' response['error'] = 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' elif e.code == 401: print 'Authentication failed' response['error'] = 'Iuthentication failed' elif e.code == 409: print 'Person with this id is already a member' response['error'] = 'Person with this id is already a member' elif e.code == 403: print 'User does not have permission to invite a person' response['error'] = 'User does not have permission to invite a person' elif e.code == 404: print 'siteId or personId does not exist' response['error'] = 'siteId or personId does not exist' else: printException(e) if serverResponse is not None and serverResponse.getcode() == 201: return response else: response['success'] = False return response def removeGroupFromSite(self, siteId, groupFullName): removeGroupFromSiteUrl = '%s/%s/%s/memberships/%s'%(self.rootUrl,Client.REMOVE_GROUP_FROM_SITE,siteId,groupFullName) response = {'success':True, 'error':None} serverResponse = None request = AlfrescoRequest(createSiteMembershipUrl, data=json.dumps(data), headers=self.headers, method='DELETE') try: print 'removing group from site ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' response['error'] = 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' elif e.code == 401: print 'Authentication failed' response['error'] = 'Iuthentication failed' elif e.code == 409: print 'Person with this id is already a member' response['error'] = 'Person with this id is already a member' elif e.code == 403: print 'User does not have permission to invite a person' response['error'] = 'User does not have permission to invite a person' elif e.code == 404: print 'siteId or personId does not exist' response['error'] = 'siteId or personId does not exist' else: printException(e) if serverResponse is not None and serverResponse.getcode() == 200: return response else: response['success'] = False return response def addUserToGroup(self, groupShortName, userId): # /alfresco/service/api/groups/{shortName}/children/{fullAuthorityName} addUserUrl = '%s/alfresco/service/api/groups/%s/children/%s'%(self.rootUrl, groupShortName, userId) print addUserUrl response = {'success':True, 'error':None} serverResponse = None data = {} # needs to post an empty json object because of the header Content-Type: application/json request = AlfrescoRequest(addUserUrl, data=json.dumps(data), headers=self.headers, method='POST') try: print 'ading user to group ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' response['error'] = 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' elif e.code == 401: print 'Authentication failed' response['error'] = 'Iuthentication failed' elif e.code == 409: print 'Person with this id is already a member' response['error'] = 'Person with this id is already a member' elif e.code == 403: print 'User does not have permission to invite a person' response['error'] = 'User does not have permission to invite a person' elif e.code == 404: print 'siteId or personId does not exist' response['error'] = 'siteId or personId does not exist' else: printException(e) if serverResponse is not None and serverResponse.getcode() == 200: return response else: response['success'] = False return response def removeUserFromGroup(self, groupShortName, userId): # '/alfresco/service/api/groups/{shortGroupName}/children/{fullAuthorityName}' removeUserUrl = '%s/alfresco/service/api/groups/%s/children/%s'%(self.rootUrl, groupShortName, userId) print removeUserUrl response = {'success':True, 'error':None} serverResponse = None request = AlfrescoRequest(removeUserUrl, headers=self.headers, method='DELETE') try: print 'removing user form group ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' response['error'] = 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' elif e.code == 401: print 'Authentication failed' response['error'] = 'Iuthentication failed' elif e.code == 409: print 'Person with this id is already a member' response['error'] = 'Person with this id is already a member' elif e.code == 403: print 'User does not have permission to invite a person' response['error'] = 'User does not have permission to invite a person' elif e.code == 404: print 'siteId or personId does not exist' response['error'] = 'siteId or personId does not exist' else: printException(e) if serverResponse is not None and serverResponse.getcode() == 200: return response else: response['success'] = False return response def createSiteMembership(self, siteId, userId, role='SiteConsumer'): createSiteMembershipUrl = '%s/%s/%s/members' % (self.url, Client.CREATE_SITE_MEMBERSHIP, siteId) print '------------- siteID %s'% createSiteMembershipUrl data = {'role':role, 'id':userId} response = {'success':True, 'error':None} serverResponse = None request = AlfrescoRequest(createSiteMembershipUrl, data=json.dumps(data), headers=self.headers, method='POST') try: print 'Create site membership ...' serverResponse = self.opener.open(request) jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' response['error'] = 'Invalid parameter: value of role or id is invalid or siteMembershipBodyCreate invalid' elif e.code == 401: print 'Authentication failed' response['error'] = 'Iuthentication failed' elif e.code == 409: print 'Person with this id is already a member' response['error'] = 'Person with this id is already a member' elif e.code == 403: print 'User does not have permission to invite a person' response['error'] = 'User does not have permission to invite a person' elif e.code == 404: print 'siteId or personId does not exist' response['error'] = 'siteId or personId does not exist' else: printException(e) if serverResponse is not None and serverResponse.getcode() == 201: return response else: response['success'] = False return response def updateDocumentVersion(self,nodeRef,file, majorVersion=False, comment='',params=None): if isinstance(majorVersion, bool): if majorVersion == True: majorVersion = 'true' else: majorVersion = 'false' params = {'majorVersion':majorVersion,'comment':comment} encodedParams = urllib.urlencode(params) url = '%s/alfresco/versions/1/nodes/%s/content'% (str(self.url), str(nodeRef)) url = '%s?%s'%(url,encodedParams) print '#### url: ',url try: len(file) except TypeError: file = file.read() request = AlfrescoRequest(url, data=file, headers=self.headers, method='PUT') try: response = self.opener.open(request) except urllib2.HTTPError, e: print e jsonResponse = json.loads(response.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) def multipartUploadDocument(self, parentNodeRef=None, nodeData=None): register_openers() datagen, headers = multipart_encode(nodeData) self.headers.update(headers) createNodeUrl = '%s/alfresco/versions/1/nodes/%s/children' % (self.url,parentNodeRef) print 'URL : ',createNodeUrl request = AlfrescoRequest(createNodeUrl, data=datagen, headers=self.headers, method='POST') serverResponse = None response = {'success':False, 'result':[], 'errorMsg':'', 'errorCode':''} try: serverResponse = urllib2.urlopen(request) except urllib2.HTTPError, e: if e.code == 400: if self.debug == 1: print 'Invalid parameter: nodeId is not a valid format or nodeBodyCreate is invalid' response['errorMsg']= 'Invalid parameter: nodeId is not a valid format or nodeBodyCreate is invalid' response['errorCode']= e.code if e.code == 401: if self.debug == 1: print 'Authentication failed' response['errorMsg'] ='Authentication failed' response['errorCode']= e.code if e.code == 403: if self.debug == 1: print 'Current user does not have permission to create children of nodeId' response['errorMsg'] = 'Current user does not have permission to create children of nodeId' response['errorCode']= e.code if e.code == 404: if self.debug == 1: print 'nodeId does not exist' response['errorMsg'] = 'nodeId does not exist' response['errorCode']= e.code if e.code == 409: if self.debug == 1: print 'New name clashes with an existing node in the current parent folder' response['errorMsg'] = 'New name clashes with an existing node in the current parent folder' response['errorCode']= e.code if e.code == 413: if self.debug == 1: print 'Content exceeds individual file size limit configured for the network or system' response['errorMsg'] = 'Content exceeds individual file size limit configured for the network or system' response['errorCode']= e.code if e.code == 422: if self.debug == 1: print 'Model integrity exception including a file name containing invalid characters' response['errorMsg'] = 'Model integrity exception including a file name containing invalid characters' response['errorCode']= e.code if e.code == 507: if self.debug == 1: print 'Content exceeds overall storage quota limit configured for the network or system' response['errorMsg'] = 'Content exceeds overall storage quota limit configured for the network or system' response['errorCode']= e.code else: printException(e) if serverResponse is not None: #print ' ----- data --- %s'%serverResponse jsonResponse = json.loads(serverResponse.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) response['result'] = jsonResponse response['success'] = True return response else: return response def createNode(self,parentNodeRef=None, nodeData=None): self.validateSession() #createNodeUrl = '%s/alfresco/versions/1/nodes/%s/children' % (self.url, parentNodeRef) createNodeUrl = '%s/alfresco/versions/1/nodes/a945184f-42db-43cd-8416-f8bf65973286/children' % (self.url) #nodeData = {"name": "UploadingFolder","nodeType": "cm:folder"} #files = {"filedata": open("file.txt", "rb")} #request = AlfrescoRequest(createNodeUrl, data=json.dumps(nodeData), headers=self.headers, method='POST') request = AlfrescoRequest(createNodeUrl, data=open("file.txt", "rb").read(), headers=self.headers, method='POST') response = None try: print 'Creating a node ...' response = self.opener.open(request) jsonResponse = json.loads(response.read()) print json.dumps(jsonResponse, indent=4, sort_keys=True) node = {'id':'','parentId':''} node['id'] = jsonResponse['entry']['id'] node['parentId'] = jsonResponse['entry']['parentId'] return node except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: nodeId is not a valid format or nodeBodyCreate is invalid' elif e.code == 401: print 'Authentication failed' elif e.code == 409: print 'New name clashes with an existing node in the current parent folder' elif e.code == 403: print 'Current user does not have permission to create children of nodeId' elif e.code == 404: print 'nodeId does not exist' else: printException(e) def getNode(self, nodeRef=None): self.validateSession() getNodeUrl = '%s/alfresco/versions/1/nodes/%s' % (self.url, nodeRef) request = AlfrescoRequest(getNodeUrl, headers=self.headers, method='GET') response = None try: print 'Obtaining a node ...' response = self.opener.open(request) jsonResponse = json.loads(response.read()) print json.dumps(jsonResponse['entry'], indent=4, sort_keys=True) node = jsonResponse['entry'] except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: nodeId is not a valid format' elif e.code == 401: print 'Authentication failed' elif e.code == 403: print 'Current user does not have permission to create children of nodeId' elif e.code == 404: print 'nodeId does not exist' else: printException(e) if response is not None: return node else: return None def getNodeChildren(self, parentNodeRef=None,isFolder=None,include=[]): """ TO-DO : support pagination """ self.validateSession() createNodeUrl = '%s/alfresco/versions/1/nodes/%s/children' % (self.url, parentNodeRef) if isFolder == True: createNodeUrl = '%s?where=(isFolder=true)'%createNodeUrl elif isFolder == False: createNodeUrl = '%s?where=(isFolder=false)'%createNodeUrl if len(include): print '--------include is not empty ',include params = '&include=' for param in include: print '----- content of one param : ',param params = '%s%s,'%(params,param) # delete the last comma params = params[:-1] createNodeUrl = '%s%s'%(createNodeUrl,params) else: print '--------include is empty ',include print '------------- getNodeChildren URL :',createNodeUrl request = AlfrescoRequest(createNodeUrl, headers=self.headers, method='GET') response = None try: print 'Obtaining a node ...' response = self.opener.open(request) jsonResponse = json.loads(response.read()) print json.dumps(jsonResponse['list']['entries'], indent=4, sort_keys=True) nodes = jsonResponse['list']['entries'] if isFolder == False: for node in nodes: downloadUrl = '%s/%s/%s/content?attachment=true'%(self.url, Client.NODE_CONTENT, node['entry']['id']) node['entry']['downloadUrl'] = downloadUrl previewUrl = '%s/%s/%s/content?attachment=false'%(self.url, Client.NODE_CONTENT, node['entry']['id']) node['entry']['previewUrl'] = previewUrl print '########!!!!!!!!!!!!! ',json.dumps(nodes, indent=4, sort_keys=True) except urllib2.HTTPError, e: if e.code == 400: print 'Invalid parameter: nodeId is not a valid format or nodeBodyCreate is invalid' elif e.code == 401: print 'Authentication failed' elif e.code == 409: print 'New name clashes with an existing node in the current parent folder' elif e.code == 403: print 'Current user does not have permission to create children of nodeId' elif e.code == 404: print 'nodeId does not exist' else: printException(e) if response is not None: return nodes else: return None def getNodeShareUrl(self, nodeRef): self.validateSession() getShareUrl = '%s/%s?nodeRef=workspace://SpacesStore/%s'%(self.rootUrl, Client.NODE_SHARE_URL, nodeRef) serverResponse = None response = {'success':False, 'result':[], 'error':[]} request = AlfrescoRequest(getShareUrl, headers=self.headers, method='GET') try: serverResponse = self.opener.open(request) except urllib2.HTTPError, e: printException(e) if serverResponse is not None: jsonResponse = json.loads(serverResponse.read()) print '-------- ROOT URL %s'%self.rootUrl print json.dumps(jsonResponse, indent=4, sort_keys=True) serverResponse.close() response['result'] = jsonResponse response['success'] = True return response else: return response def getNodeContent(self,nodeRef): self.validateSession() getNodeContentUrl = '%s/%s/%s/content?attachment=true'%(self.url, Client.NODE_CONTENT, nodeRef) request = AlfrescoRequest(getNodeContentUrl, headers=self.headers, method='GET') response = None try: print 'Obtaining a node content ...' response = self.opener.open(request) except urllib2.HTTPError, e: if e.code == 304: print 'Content has not been modified since the date provided in the If-Modified-Since header' elif e.code == 400: print 'Invalid parameter: nodeId is not a valid format, or is not a file' elif e.code == 401: print 'Authentication failed' elif e.code == 403: print 'Current user does not have permission to retrieve content of nodeId' elif e.code == 404: print 'nodeId does not exist' else: printException(e) if response is not None: return response else: return None def getDocumentLibraryNodeRef(self, siteNodeRef): nodes = self.getNodeChildren(siteNodeRef) docLibNodeRef = False for node in nodes: if node['entry']['name'] == 'documentLibrary': docLibNodeRef = node['entry']['id'] break return docLibNodeRef class AlfrescoInstance: def __init__(self, url): self.url = url.rstrip('/') def getBaseUrl(self): """ return the base url for the running alfresco instance""" return self.url def getUrl(self, path): """ concat the base url with the path pointing to the needed function @param path : path to the function Exp: 'sites' """ return '%s/%s' % (self.getBaseUrl(),path) class AlfrescoRequest(urllib2.Request): """ Extending the urllib2.Request class in order to add additional request method to GET and POST like PUT and DELETE """ def __init__(self, url, data=None, headers={}, origin_req_host=None, unverifiable=False, method=None): urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable) self.httpMethod = method def get_method(self): if self.httpMethod is not None: return self.httpMethod else: return urllib2.Request.get_method(self) def set_method(self, method): self.httpMethod = method def printException(exception): print '---- > Unkown Error occured : \n\tURL : %s \n\tCODE : %s \n\tMSG : %s' % (exception.url, exception.code, exception.msg) jsonData = json.loads(exception.fp.read()) if jsonData is not None: print json.dumps(jsonData, indent=4, sort_keys=True) else: print 'Response is not of type JSON\n' if __name__ == '__main__': client = Client(url='http://localhost:8080',debug=1) response = client.loginUser('admin','Nevermind') #siteData = {'id':'FileUploaderA','title':'FileUploaderA','visibility':'PUBLIC','description':'Description'} #response = client.createSite(siteData) #print '--------- response : %s'% response #siteID = response['response']['guid'] #response = client.getNodeChildren(siteID) #docLibID = '' #for node in response: # if node['entry']['name'] == 'documentLibrary': # print 'documentLibrary NodeRef : %s'% node['entry']['id'] # docLibID = node['entry']['id'] # break #nodeData = {"name": "Quotation","nodeType": "cm:folder"} #print client.getNodeShareUrl('6417d086-f937-43c8-874d-e4a16efa7126') #client.uploadDocument() ##### ----------------------------------------------- #Url = '%s/alfresco/versions/1/nodes/8272786f-1fe2-488a-ae53-3a25a37a7c9e/content?majorVersion=true'% (client.url) #print '# %s'%Url #url = 'http://vps385039.ovh.net:8080/alfresco/api/-default-/public/alfresco/versions/1/nodes/8272786f-1fe2-488a-ae53-3a25a37a7c9e/content?majorVersion=true' #files = open('SO035.pdf', "rb").read() #print '## HEADERS %s'% client.headers #request = AlfrescoRequest(Url, data=files, headers=client.headers, method='PUT') #try: # response = client.opener.open(request) #except urllib2.HTTPError, e: # printException(e) #jsonResponse = json.loads(response.read()) #print json.dumps(jsonResponse, indent=4, sort_keys=True) #client.getNodeChildren('618d5146-059f-41bd-a95f-12b8df097cac'); #print client.addUserToGroup('students_managers', '1_user') #file = open('file.txt', 'rb') #nodeData = {"filedata": file, "name":"whatat","cm:title":"whaats", "nodeType":"cm:content"} #response = client.multipartUploadDocument('8207d52c-f9fc-492b-9f45-6ad8825e3a0b',nodeData) #print response file = open('file.txt', 'rb') print client.updateDocumentVersion('3b995a10-418f-4209-91b1-afe8091a2256', file, 'true', 'comments of the new version from terminal') #curl -uadmin:Nevermind -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"name":"My text file.txt","nodeType":"cm:content"}' 'http://localhost:8080/alfresco/api/-default-/public/alfresco/versions/1/nodes/3210b291-3bb7-4675-a959-429a5caf56a7/children' -F filedata=@file.txt
998,679
1d50ab61b77d71231af030807b744e7e6175b80c
import Pixiv_Crawler def on_PC(illust_id): pixiv = Pixiv_Crawler.Pixiv() res = pixiv.illust_detail(illust_id, is_pc=True) details = res['body']['illust_details'] html = pixiv.illust_pages(illust_id)['body'] for data in html: url = data['urls']['original'] pixiv.download(url) def on_APP(illust_id): pixiv = Pixiv_Crawler.Pixiv() pixiv.login('pixiv_id', 'password') res = pixiv.illust_detail(illust_id) try: details = res['illust'] except: details = res['illusts'] try: urls = details['meta_single_page']['original_image_url'] pixiv.download(urls) except: html = details['meta_pages'] for data in html: url = data['image_urls']['original'] pixiv.download(url) def main(): illust_id = '84278666' # illust #illust_id = '66808665' # manga on_PC(illust_id) #on_APP(illust_id) if __name__ == '__main__': main()
998,680
9be37e4bb8915964bd9a80caf7c9bc00fda723c5
# -*- coding: utf-8 -*- """ Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pathlib import Path import click from rnacentral_pipeline.rnacentral import attempted, r2dt @click.group("r2dt") def cli(): """ A group of commands for parsing data from secondary structures into an importable format. """ pass @cli.command("process-svgs") @click.option("--allow-missing", is_flag=True, default=False) @click.argument("model_info", type=click.File("r")) @click.argument("directory", type=click.Path()) @click.argument("output", type=click.File("w")) def process_svgs(model_info, directory, output, allow_missing=False): """ Process all SVG secondary structures in the given directory and produce a single data file that can be imported into the database. """ r2dt.write(model_info, directory, output, allow_missing=allow_missing) @cli.group("should-show") def should_show(): """ Some commands relating to building a model for should show as well as running it. """ @should_show.command("convert-sheet") @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def convert_sheet(filename, output): """ This command is to convert a downloaded google sheet csv into a csv that can be used for training data. Often we will build a spreadsheet of example URS and then use that to build a training set. It is nice since you can embedd an SVG in google sheets so it is fast for us to compare several of them. In order to move that back into the training data you can download that sheet as a CSV and then run this command on it to build the CSV that is used in training. It requires there be a 'urs' and 'Labeled Should show' column to build the CSV. The values in labeled should show must be true/false (ignoring case). """ r2dt.write_converted_sheet(filename, output) @should_show.command("fetch-data") @click.option("--db-url", envvar="PGDATABASE") @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def fetch_training_data(filename, output, db_url=None): """ This builds a CSV file of training data to use for the model building. I keep it separate so I can build a training csv and play with it interactivly before committing the final modeling building logic to the pipeline. """ r2dt.write_training_data(filename, db_url, output) @should_show.command("inspect-data") @click.option("--db-url", envvar="PGDATABASE") @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def fetch_inspect_data(filename, output, db_url=None): """ This is the command to use when trying to fetch more examples to add to the training set. This will fetch some information that is useful for a person to evaluate a diagram and decide if it should be true/false in the training set. """ r2dt.write_training_data(filename, db_url, output) @should_show.command("build-model") @click.option("--db-url", envvar="PGDATABASE") @click.argument("training-info", type=click.File("r")) @click.argument("model", type=click.Path()) def build_model(training_info, model, db_url=None): """ This builds a model given then training information. The training information should be a csv file of: URS,flag The flag must be 1 or 0 to indicate if the URS should be shown or not. THis will fetch the data like the fetch-data command but will then build a model and write it out the the output file directly. """ r2dt.build_model(training_info, db_url, Path(model)) @should_show.command("compute") @click.option("--db-url", envvar="PGDATABASE") @click.argument("model", type=click.Path()) @click.argument("filename", type=click.File("r")) @click.argument("output", type=click.File("w")) def write_should_show(model, filename, output, db_url=None): """ This computes the should show values for the data in the given file and a file listing urs ids to use. The data needed for the URS will be fetched from the database. This is meant to operate on large batches, like relabeling the entire database. """ r2dt.write_should_show(model, filename, db_url, output) @cli.group("model-info") def model_info(): """ Commands for parsing and generating data files we can import into the database as model info files. """ pass @model_info.command("crw") @click.argument("filename", type=click.File("r")) @click.argument("metadata_url", type=str) @click.argument("output", default="-", type=click.File("w")) def crw_model_info(filename, metadata_url, output): """ Parse the CRW metadata file and produce """ r2dt.write_crw(filename, metadata_url, output) @model_info.command("ribovision") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def ribovision_model_info(filename, output): """ Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. """ r2dt.write_ribovision(filename, output) @model_info.command("gtrnadb") @click.argument("filename", type=click.File()) @click.argument("output", default="-", type=click.File("w")) def gtrnadb_model_info(filename, output): """ Parse the metadata.tsv file from R2DT for gtrnadb models to produce something we can put in our database. """ r2dt.write_gtrnadb(filename, output) @model_info.command("rnase-p") @click.argument("filename", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def rnase_p_model_info(filename, output): """ Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. """ r2dt.write_rnase_p(filename, output) @model_info.command("rfam") @click.argument("filename", type=click.File("r")) @click.argument("db_url", type=str) @click.argument("output", default="-", type=click.File("w")) def rnase_p_model_info(filename, db_url, output): """ Parse the metadata.tsv file from R2DT for Ribovision models to produce something we can put in our database. """ r2dt.write_rfam(filename, db_url, output) @cli.command("create-attempted") @click.argument("filename", type=click.File("r")) @click.argument("version", type=click.File("r")) @click.argument("output", default="-", type=click.File("w")) def r2dt_create_attempted(filename, version, output): version_string = version.read().strip() attempted.r2dt(filename, version_string, output) @cli.command("publish") @click.option("--suffix", default="") @click.option("--allow-missing", is_flag=True, default=False) @click.argument("model_info", type=click.File("r")) @click.argument( "directory", type=click.Path( writable=False, dir_okay=True, file_okay=False, ), ) @click.argument( "output", type=click.Path( writable=True, dir_okay=True, file_okay=False, ), ) def r2dt_publish(model_info, directory, output, allow_missing, suffix=""): r2dt.publish( model_info, directory, output, allow_missing=allow_missing, suffix=suffix ) @cli.command("prepare-s3") @click.option("--allow-missing", is_flag=True, default=False) @click.argument("model_info", type=click.File("r")) @click.argument( "directory", type=click.Path( writable=False, dir_okay=True, file_okay=False, ), ) @click.argument( "output", type=click.Path( writable=True, dir_okay=True, file_okay=False, ), ) @click.argument("file_list", type=click.Path()) def r2dt_prepare_s3(model_info, directory, output, file_list, allow_missing): file_list = Path(file_list) output = Path(output) r2dt.prepare_s3( model_info, directory, output, file_list, allow_missing=allow_missing )
998,681
c4b8c35f22d2b294594a578f92e7c9684810f682
import numpy as np import pandas as pd import os import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D from keras.layers.normalization import BatchNormalization from keras.initializers import he_normal from sklearn.preprocessing import normalize from sklearn.model_selection import train_test_split import pickle print("Reading Data...") f = os.listdir('train/') f = [j for j in f if '.npy' in j] cats = [j.split('.')[0] for j in f] class2id = dict(list(zip(cats, np.arange(len(cats))))) id2class = dict(list(zip(np.arange(len(cats)), cats))) X = np.zeros((100000, 784)) y = np.zeros(100000) for i in range(len(f)): file = f[i] a = np.load('train/'+file) X[5000*i:5000*(i+1), :] = a y[5000*i:5000*(i+1)] = class2id[cats[i]] print("Read and formatted data!") #Pre-processing mean = np.mean(X, axis=0) X = np.subtract(X, mean) X = normalize(X) # one_hot_labels = keras.utils.to_categorical(y, num_classes=20) #@title Default title text X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1, random_state=42) X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)) y_train = y_train.reshape((len(y_train), 1)) X_val = X_val.reshape((X_val.shape[0], 28, 28, 1)) y_val = y_val.reshape((len(y_val), 1)) y_train_one_hot = keras.utils.to_categorical(y_train, num_classes=20) y_val_one_hot = keras.utils.to_categorical(y_val, num_classes=20) X_test = np.load('test/test.npy') mean = np.mean(X_test, axis=0) X_test = np.subtract(X_test, mean) X_test = normalize(X_test) X_test = X_test.reshape((100000, 28, 28, 1)) print('Starting training') model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1), padding='same', kernel_initializer=he_normal())) model.add(Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.2)) model.add(Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Dropout(0.2)) model.add(Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer=he_normal())) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(units=4096, activation='relu')) model.add(Dense(units=4096, activation='relu')) model.add(Dense(20, activation='softmax')) rmsprop = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0) adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) nadam = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004) model.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy']) x = model.fit(X_train, y_train_one_hot, epochs=40, batch_size=512, verbose=2, validation_data=(X_val, y_val_one_hot)) print x loss_and_metrics = model.evaluate(X_train, y_train_one_hot) print(str(loss_and_metrics)) train_acc = loss_and_metrics[1] loss_and_metrics = model.evaluate(X_val, y_val_one_hot) val_acc = loss_and_metrics[1] print(str(loss_and_metrics)) df_sub = pd.read_csv('sampleSubmission.csv') print('Calculating predictions on test set...') y_pred = model.predict(X_test) print(y_pred) y_pred = np.argmax(y_pred, axis=1) print(y_pred) y_pred = y_pred.reshape((len(y_pred), 1)) df_sub['CATEGORY'] = y_pred df_sub['CATEGORY'] = pd.Series(list(map(lambda x : id2class[x], df_sub['CATEGORY']))) df_sub.to_csv('vgg13_1.csv', index=False) filename = 'vgg13_1.pkl' model.save('vgg13_1.h5') # pickle.dump(model, filename) filename.close()
998,682
a07d9baf74d288dbc3ff5c059233299ee2d2f87b
#view for page three from django.contrib.sessions.models import Session from django.shortcuts import render import requests import time import datetime def bugThree(request): request.session['sVar']['weight'] = "weight created from bug 3 m" #request.session.modfied = True ts = time.time() timeS3 = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') #sVar = request.session['sVar'] #sVar ['weight'] = "weight created from bug 3 " #request.session['sVar'] = sVar return render ( request, 'myfirstapp/bug3.html', { 'bug3Out' : request.session['sVar'].values(), 'bug3Id' : request.session._session_key, 'bug3TS': timeS3 } )
998,683
b98459ab9b2d888821b1a0e9b046f6721621034f
# flake8: noqa import os.path as osp import numpy as np import yaml here = osp.dirname(osp.abspath(__file__)) def arc2017(): data_file = osp.join(here, "data.npz") data = dict(np.load(data_file)) # compose masks to class label image class_label = np.full(data["rgb"].shape[:2], 0, dtype=np.int32) for l, mask in zip(data["labels"], data["masks"]): class_label[mask == 1] = l data["class_label"] = class_label names_file = osp.join(here, "class_names.txt") with open(names_file) as f: class_names = [name.strip() for name in f] data["class_names"] = class_names data["res4"] = np.load(osp.join(here, "res4.npz"), allow_pickle=True)[ "res4" ] with open(osp.join(here, "camera_info.yaml")) as f: data["camera_info"] = yaml.safe_load(f) return data
998,684
85fc2ede22a2e0de25092d8cdb17a04edbe44b76
''' Знайти суму додатніх елементів лінійного масиву цілих чисел. Розмірність масиву - 10. Заповнення масиву здійснити з клавіатури. ''' import numpy as np A = np.zeros(10) s = 0 for i in range(len(A)): A[i] = int(input('Enter element: ')) for i in A: if i > 0: s += i print(A) print(s)
998,685
1ab302b5c45b22e9a4b706b6377071d474d75140
#!/usr/bin/env python POWS = [1 << POS for POS in range(33)] t = int(raw_input().strip()) for _ in xrange(t): a, b = map(int, raw_input().strip().split()) c = b - a + 1 if c == 1 or c == 2: print a & b else: pos = 0 while c > POWS[pos]: pos += 1 a //= POWS[pos] b //= POWS[pos] r = 2 ** 32 - 1 while a <= b: r &= a a += 1 print r * POWS[pos]
998,686
2f7bc50f55c30d851e6e1af9b9ccaeb399af936b
import sys sys.path.append("..") from guerraterritorios.tests.testsFunctions import * from guerraterritorios.models.provincias import * def esUnaProvinciaPuedeDetectarSiUnaListaCumpleLaEstructura(): provincia = [ "Alajuela", [ [ "Upala", [ [ [10, 20, 30], [10,50, 53] ], [ [10, 20, 30], [10,50, 53] ] ] ] ] ] esVerdadero(esUnaProvincia(provincia)) def esUnaProvinciaPuedeDetectarSiUnaListaNoCumpleLaEstructura(): provincia = [ "Alajuela", [ [ "Upala", [ [ [10, 20, 30], [10,50, 53] ] ] ] ] ] esFalso(esUnaProvincia(provincia)) def correrTests(): printTitulo("Testeando guerraterritorios/models/cantones.py:") print("EsUnaProvincia puede detectar si una lista cumple la estructura:") esUnaProvinciaPuedeDetectarSiUnaListaCumpleLaEstructura() print("EsUnaProvincia puede detectar si una lista no cumple la estructura:") esUnaProvinciaPuedeDetectarSiUnaListaNoCumpleLaEstructura() correrTests()
998,687
0a56355d7b704646c4c9c1dff4057d085214501e
from params import * from K2_K2_64x52 import K2_K2_transpose_64x52 p = print def karatsuba_eval(dst, dst_off, coeff, src, t0, t1): """ t1 can overlap with any source register, but not t0 """ p("vmovdqa %ymm{}, {}({})".format(src[0], (dst_off+4*0+coeff)*32, dst)) # a[0:] p("vmovdqa %ymm{}, {}({})".format(src[1], (dst_off+4*1+coeff)*32, dst)) # a[44:] p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[0], src[1], t0)) p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+4*2+coeff)*32, dst)) # s1[0:] p("vmovdqa %ymm{}, {}({})".format(src[2], (dst_off+4*3+coeff)*32, dst)) # a[88:] p("vmovdqa %ymm{}, {}({})".format(src[3], (dst_off+4*4+coeff)*32, dst)) # a[132:] p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[2], src[3], t0)) p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+4*5+coeff)*32, dst)) # s2[0:] p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[0], src[2], t0)) p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+4*6+coeff)*32, dst)) # s0[0:] p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(src[1], src[3], t1)) p("vmovdqa %ymm{}, {}({})".format(t1, (dst_off+4*7+coeff)*32, dst)) # s0[44:] p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(t0, t1, t0)) p("vmovdqa %ymm{}, {}({})".format(t0, (dst_off+4*8+coeff)*32, dst)) # s3[0:] def karatsuba_interpolate(dst, dst_off, src, src_off, coeff): """ Destroys all ymm regs and does not leave useful values. In practice we're doing 7 of these sequentially, so there is no reasonable way to save any high-coefficients results. """ def addr(i, off): return '{}({})'.format((src_off+4*(2*i+off//52)+coeff)*32, src) r0_52 = 0 p("vmovdqa {}, %ymm{}".format(addr(0, 52), r0_52)) out0_52 = r0_52 p("vpsubw {}, %ymm{}, %ymm{}".format(addr(1, 0), r0_52, out0_52)) r2_52 = 1 p("vmovdqa {}, %ymm{}".format(addr(2, 52), r2_52)) out1_0 = r2_52 p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out0_52, r2_52, out1_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(1, 52), out1_0, out1_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(0, 0), out0_52, out0_52)) p("vpaddw {}, %ymm{}, %ymm{}".format(addr(2, 0), out0_52, out0_52)) r3_52 = 2 p("vmovdqa {}, %ymm{}".format(addr(3, 52), r3_52)) out2_52 = r3_52 p("vpsubw {}, %ymm{}, %ymm{}".format(addr(4, 0), r3_52, out2_52)) r5_52 = 3 p("vmovdqa {}, %ymm{}".format(addr(5, 52), r5_52)) out3_0 = r5_52 p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out2_52, r5_52, out3_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(4, 52), out3_0, out3_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(3, 0), out2_52, out2_52)) p("vpaddw {}, %ymm{}, %ymm{}".format(addr(5, 0), out2_52, out2_52)) r6_52 = 4 p("vmovdqa {}, %ymm{}".format(addr(6, 52), r6_52)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(7, 0), r6_52, r6_52)) r8_52 = 5 p("vmovdqa {}, %ymm{}".format(addr(8, 52), r8_52)) r7_0 = r8_52 p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r6_52, r8_52, r7_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(7, 52), r7_0, r7_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(6, 0), r6_52, r6_52)) p("vpaddw {}, %ymm{}, %ymm{}".format(addr(8, 0), r6_52, r6_52)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(3, 0), out1_0, out1_0)) out2_0 = r7_0 p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out1_0, r7_0, out2_0)) p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out3_0, out2_0, out2_0)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(0, 0), out1_0, out1_0)) p("vpaddw {}, %ymm{}, %ymm{}".format(addr(6, 0), out1_0, out1_0)) r1_52 = 6 p("vmovdqa {}, %ymm{}".format(addr(1, 52), r1_52)) out1_52 = 7 p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out2_52, r1_52, out1_52)) r7_52 = out2_52 p("vmovdqa {}, %ymm{}".format(addr(7, 52), r7_52)) p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out1_52, r7_52, out2_52)) p("vpsubw {}, %ymm{}, %ymm{}".format(addr(4, 52), out2_52, out2_52)) p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(out0_52, out1_52, out1_52)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(r6_52, out1_52, out1_52)) # TODO can get rid of these by fetching them from the right place during Toom4 eval out0_0 = 8 out3_52 = 9 p("vmovdqa {}, %ymm{}".format(addr(0, 0), out0_0)) p("vmovdqa {}, %ymm{}".format(addr(4, 52), out3_52)) # TODO should move these up in between computations for better pipelining? p("vmovdqa %ymm{}, {}({})".format(out0_0, (dst_off+2*0+0)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out0_52, (dst_off+2*0+1)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out1_0, (dst_off+2*1+0)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out1_52, (dst_off+2*1+1)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out2_0, (dst_off+2*2+0)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out2_52, (dst_off+2*2+1)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out3_0, (dst_off+2*3+0)*32, dst)) p("vmovdqa %ymm{}, {}({})".format(out3_52, (dst_off+2*3+1)*32, dst)) def idx2off(i): """ Produces [0, 32, 64, 96, 104, 136, 168, 200, 208, 240, 272, 304, 312, 344, 376, 408] These are the byte offsets when dividing into 52-coeff chunks""" return i * 32 - (24 * (i//4)) if __name__ == '__main__': p(".data") p(".p2align 5") p("mask_low9words:") for i in [65535]*9 + [0]*7: p(".word 0x{:x}".format(i)) p("const3:") for i in range(16): p(".word 3") p("const9:") for i in range(16): p(".word 9") p("const0:") for i in range(16): p(".word 0") p("const729:") for i in range(16): p(".word 729") p("const3_inv:") # inverse of 3 mod 2**16 for i in range(16): p(".word 43691") p("const5_inv:") # inverse of 3 mod 2**16 for i in range(16): p(".word 52429") p("rol_rol_16:") for j in range(2): for i in range(16): p(".byte {}".format((i + 2) % 16)) p("mask32_to_16:") for a, b in zip([65535]*8, [0]*8): p(".word 0x{:x}".format(a)) p(".word 0x{:x}".format(b)) p("mask_9_7:") for i in range(9): p(".word 65535") for i in range(7): p(".word 0") p("mask_7_9:") for i in range(7): p(".word 65535") for i in range(9): p(".word 0") p(".text") p(".global {}poly_Rq_mul".format(NAMESPACE)) p(".global _{}poly_Rq_mul".format(NAMESPACE)) p("{}poly_Rq_mul:".format(NAMESPACE)) p("_{}poly_Rq_mul:".format(NAMESPACE)) # assume a and b in rsi and rdx respectively # assume destination pointer in rdi r_real = '%rdi' a_real = '%rsi' b_real = '%rdx' # karatsuba layers use registers rcx, r9 and r10 # r8 is used to store the stack pointer # that leaves rax and r11 for pointers, so we must preserve one more p("push %r12") r_out = '%r12' a_prep = '%rax' b_prep = '%r11' p("mov %rsp, %r8") # Use r8 to store the old stack pointer during execution. p("andq $-32, %rsp") # Align rsp to the next 32-byte value, for vmovdqa. # allocate destination block for prepared a p("subq ${}, %rsp".format((64 * 64 // 16) * 32)) p("mov %rsp, {}".format(a_prep)) # allocate destination block for prepared b p("subq ${}, %rsp".format((64 * 64 // 16) * 32)) p("mov %rsp, {}".format(b_prep)) # allocate destination block for resulting r p("subq ${}, %rsp".format((64 * 128 // 16) * 32)) p("mov %rsp, {}".format(r_out)) # allocate some space for f0-f3 p("subq ${}, %rsp".format(16 * 32)) # Zero the result register p("vpxor %ymm3, %ymm3, %ymm3") for i in range(2*832//32): p("vmovdqa %ymm3, {}({})".format(i*32, r_real)) ###### evaluate Toom4 / K2 / K2 # think of blocks of 52 coefficients, for karatsuba preparation # we evaluate for first 16 coefficients of each block, then 16, then 16, then 4 const_3 = 3 p("vmovdqa const3(%rip), %ymm{}".format(const_3)) for (prep, real) in [(a_prep, a_real), (b_prep, b_real)]: for coeff in range(4): f0 = [0, 1, 2, 12] # we already have const_3 in 3 (keeping it saves 5 loads) # TODO replace vmovdqu with vmovdqa when possible for i, r in enumerate(f0): p("vmovdqu {}({}), %ymm{}".format(0*13*32+idx2off(i*4+coeff), real, r)) f3 = [4, 5, 6, 7] for i, r in enumerate(f3): p("vmovdqu {}({}), %ymm{}".format(3*13*32+idx2off(i*4+coeff), real, r)) # there are 821 coefficients, not 832; if coeff == 2: # Mask out last 7 coeff in load from offset 812 p("vpand mask_9_7(%rip), %ymm{}, %ymm{}".format(f3[3], f3[3])) if coeff == 3: # Mask out last 16 coeff in load from offset 828 p("vpxor %ymm{}, %ymm{}, %ymm{}".format(f3[3], f3[3], f3[3])) # retrieve f1 so we can store it in the stack and use for vpadd f1 = [8, 9, 10, 11] for i, r in enumerate(f1): p("vmovdqu {}({}), %ymm{}".format(1*13*32+idx2off(i*4+coeff), real, r)) t0 = 14 t1 = 15 karatsuba_eval(prep, dst_off=0*9*4, src=f0, t0=t0, t1=t1, coeff=coeff) karatsuba_eval(prep, dst_off=6*9*4, src=f3, t0=t0, t1=t1, coeff=coeff) # store f0 and f1 so we can use those registers (storing guarantees alignment) for i, r in enumerate(f0): p("vmovdqa %ymm{}, {}(%rsp)".format(r, (0*4+i)*32)) for i, r in enumerate(f1): p("vmovdqa %ymm{}, {}(%rsp)".format(r, (1*4+i)*32)) x1 = [8, 9, 10, 11] x2 = [12, 13, 14, 15] for i in range(4): f2_i = 0 p("vmovdqu {}({}), %ymm{}".format(2*13*32+idx2off(i*4+coeff), real, f2_i)) f0f2_i = 1 p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((0*4+i)*32, f2_i, f0f2_i)) f1f3_i = 2 p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((1*4+i)*32, f3[i], f1f3_i)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(f1f3_i, f0f2_i, x1[i])) p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(f1f3_i, f0f2_i, x2[i])) # also store the retrieved element of f2 on the stack, makes addition easier later p("vmovdqa %ymm{}, {}(%rsp)".format(f2_i, (2*4+i)*32)) t0 = 0 t1 = 1 karatsuba_eval(prep, dst_off=1*9*4, src=x1, t0=t0, t1=t1, coeff=coeff) karatsuba_eval(prep, dst_off=2*9*4, src=x2, t0=t0, t1=t1, coeff=coeff) x3 = [8, 9, 10, 11] x4 = [12, 13, 14, 15] for i in range(4): f2_i = 0 p("vmovdqa {}(%rsp), %ymm{}".format((2*4+i)*32, f2_i)) f2_4_i = 0 p("vpsllw $2, %ymm{}, %ymm{}".format(f2_i, f2_4_i)) f0f2_4_i = 0 p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((0*4+i)*32, f2_4_i, f0f2_4_i)) f3_4_i = 1 p("vpsllw $2, %ymm{}, %ymm{}".format(f3[i], f3_4_i)) f1f3_4_i = 1 p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((1*4+i)*32, f3_4_i, f1f3_4_i)) f1_2f3_8_i = 1 p("vpsllw $1, %ymm{}, %ymm{}".format(f1f3_4_i, f1_2f3_8_i)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(f1_2f3_8_i, f0f2_4_i, x3[i])) p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(f1_2f3_8_i, f0f2_4_i, x4[i])) t0 = 0 t1 = 1 karatsuba_eval(prep, dst_off=3*9*4, src=x3, t0=t0, t1=t1, coeff=coeff) karatsuba_eval(prep, dst_off=4*9*4, src=x4, t0=t0, t1=t1, coeff=coeff) x5 = [12, 13, 14, 15] for i in range(4): f3_3_i = 0 p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const_3, f3[i], f3_3_i)) f2f3_3_i = 0 p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((2*4+i)*32, f3_3_i, f2f3_3_i)) f2_3f3_9_i = 0 p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const_3, f2f3_3_i, f2_3f3_9_i)) f1f2_3f3_9_i = 0 p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((1*4+i)*32, f2_3f3_9_i, f1f2_3f3_9_i)) f1_3f2_9f3_27_i = 0 p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const_3, f1f2_3f3_9_i, f1_3f2_9f3_27_i)) p("vpaddw {}(%rsp), %ymm{}, %ymm{}".format((0*4+i)*32, f1_3f2_9f3_27_i, x5[i])) karatsuba_eval(prep, dst_off=5*9*4, src=x5, t0=t0, t1=t1, coeff=coeff) K2_K2_transpose_64x52(r_out, a_prep, b_prep) ###### interpolate Toom4 / K2 / K2 # we could have probably left something in registers after the transpose # but that is extremely messy and would've maybe saved ten cycles at most # we need 7*8 registers for the 7 interpolationts. # (2*832/52 = 32, 4-way sequential over coefficients => 8 registers) # there are already 16 registers available from f0-f3, so allocate 7*8 - 16 more p("subq ${}, %rsp".format((7*8 - 16) * 32)) registers = list(range(16)) def free(*regs): for x in regs: if x in registers: raise Exception("This register is already freed") registers.append(x) def alloc(): return registers.pop() const729 = alloc() p("vmovdqa const729(%rip), %ymm{}".format(const729)) const3_inv = alloc() p("vmovdqa const3_inv(%rip), %ymm{}".format(const3_inv)) const5_inv = alloc() p("vmovdqa const5_inv(%rip), %ymm{}".format(const5_inv)) const9 = alloc() p("vmovdqa const9(%rip), %ymm{}".format(const9)) # consider swapping this around for more closely linked memory access # they're somewhat spread around because of how the transpose worked, but # staying sane while incrementally writing/testing this is also important for coeff in range(4): for i in range(7): karatsuba_interpolate(dst='%rsp', dst_off=i*4*2, src=r_out, src_off=i*9*8, coeff=coeff) # after interpolating, we can even go 24-way sequential; # none of the 52-coefficient chunks interact anymore before reduction for j in range(8): # for each 16 (or 4) coefficient chunk def limb(i): # TODO see above; for case j in {0, 8}, make an exception return '{}(%rsp)'.format((i*8+j)*32) h0 = alloc() p("vmovdqa {}, %ymm{}".format(limb(0), h0)) h0lo = alloc() h0hi = alloc() p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(h0, h0lo)) p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(h0, h0hi)) free(h0lo) h0_2lo = alloc() p("vpslld $1, %ymm{}, %ymm{}".format(h0lo, h0_2lo)) free(h0hi) h0_2hi = alloc() p("vpslld $1, %ymm{}, %ymm{}".format(h0hi, h0_2hi)) t1 = alloc() p("vmovdqa {}, %ymm{}".format(limb(1), t1)) t1lo = alloc() p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(t1, t1lo)) free(t1) t1hi = alloc() p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(t1, t1hi)) t2 = alloc() p("vmovdqa {}, %ymm{}".format(limb(2), t2)) t2lo = alloc() p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(t2, t2lo)) free(t2) t2hi = alloc() p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(t2, t2hi)) t11lo = alloc() p("vpaddd %ymm{}, %ymm{}, %ymm{}".format(t2lo, t1lo, t11lo)) t11hi = alloc() p("vpaddd %ymm{}, %ymm{}, %ymm{}".format(t2hi, t1hi, t11hi)) free(h0_2lo, t11lo) t11c1lo = alloc() p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h0_2lo, t11lo, t11c1lo)) free(h0_2hi, t11hi) t11c1hi = alloc() p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h0_2hi, t11hi, t11c1hi)) free(t1lo, t2lo) t12lo = alloc() p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(t2lo, t1lo, t12lo)) free(t1hi, t2hi) t12hi = alloc() p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(t2hi, t1hi, t12hi)) p("vpsrld $1, %ymm{}, %ymm{}".format(t12lo, t12lo)) p("vpsrld $1, %ymm{}, %ymm{}".format(t12hi, t12hi)) p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t12lo, t12lo)) p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t12hi, t12hi)) free(t12lo, t12hi) r11s = alloc() p("vpackusdw %ymm{}, %ymm{}, %ymm{}".format(t12hi, t12lo, r11s)) h6 = alloc() p("vmovdqa {}, %ymm{}".format(limb(6), h6)) h6lo = alloc() p("vpunpcklwd const0(%rip), %ymm{}, %ymm{}".format(h6, h6lo)) h6hi = alloc() p("vpunpckhwd const0(%rip), %ymm{}, %ymm{}".format(h6, h6hi)) free(h6lo) h6_2lo = alloc() p("vpslld $1, %ymm{}, %ymm{}".format(h6lo, h6_2lo)) free(h6hi) h6_2hi = alloc() p("vpslld $1, %ymm{}, %ymm{}".format(h6hi, h6_2hi)) free(h6_2lo, t11c1lo) t11c2lo = alloc() p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h6_2lo, t11c1lo, t11c2lo)) free(h6_2hi, t11c1hi) t11c2hi = alloc() p("vpsubd %ymm{}, %ymm{}, %ymm{}".format(h6_2hi, t11c1hi, t11c2hi)) p("vpsrld $1, %ymm{}, %ymm{}".format(t11c2lo, t11c2lo)) p("vpsrld $1, %ymm{}, %ymm{}".format(t11c2hi, t11c2hi)) p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t11c2lo, t11c2lo)) p("vpand mask32_to_16(%rip), %ymm{}, %ymm{}".format(t11c2hi, t11c2hi)) free(t11c2lo, t11c2hi) r11 = alloc() p("vpackusdw %ymm{}, %ymm{}, %ymm{}".format(t11c2hi, t11c2lo, r11)) t3 = alloc() p("vmovdqa {}, %ymm{}".format(limb(3), t3)) t13 = alloc() p("vpaddw {}, %ymm{}, %ymm{}".format(limb(4), t3, t13)) free(t3) t14 = alloc() p("vpsubw {}, %ymm{}, %ymm{}".format(limb(4), t3, t14)) free(t14) r12s = alloc() p("vpsrlw $2, %ymm{}, %ymm{}".format(t14, r12s)) free(r12s) e12s = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r11s, r12s, e12s)) free(e12s) r22 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const3_inv, e12s, r22)) h0_2 = alloc() p("vpsllw $1, %ymm{}, %ymm{}".format(h0, h0_2)) free(t13, h0_2) t13c1 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h0_2, t13, t13c1)) h6_128 = alloc() p("vpsllw $7, %ymm{}, %ymm{}".format(h6, h6_128)) free(t13c1, h6_128) t13c2 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h6_128, t13c1, t13c2)) free(t13c2) r12 = alloc() p("vpsrlw $3, %ymm{}, %ymm{}".format(t13c2, r12)) free(r12) e12 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r11, r12, e12)) # currently alive: h0, r11, e12, r11s, r22, h6 t5 = alloc() p("vmovdqa {}, %ymm{}".format(limb(5), t5)) free(t5) t5c1 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h0, t5, t5c1)) h6_729 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const729, h6, h6_729)) free(t5c1, h6_729) t5c2 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h6_729, t5c1, t5c2)) free(e12) h4 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const3_inv, e12, h4)) free(r11) h2 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h4, r11, h2)) h4_9 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const9, h4, h4_9)) # currently alive: h0, h2, h4, h6, h4_9, r22, t5c2, r11s free(h4_9) h2h4_9 = alloc() p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(h4_9, h2, h2h4_9)) free(h2h4_9) h2_9h4_81 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const9, h2h4_9, h2_9h4_81)) free(t5c2, h2_9h4_81) t16 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h2_9h4_81, t5c2, t16)) free(t16) r13 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const3_inv, t16, r13)) free(r13) e13 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r11s, r13, e13)) free(e13) r23 = alloc() p("vpsrlw $3, %ymm{}, %ymm{}".format(e13, r23)) free(r23) e23 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(r22, r23, e23)) free(r22) h3 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(e23, r22, h3)) free(r11s) im1 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h3, r11s, im1)) free(e23) h5 = alloc() p("vpmullw %ymm{}, %ymm{}, %ymm{}".format(const5_inv, e23, h5)) free(im1) h1 = alloc() p("vpsubw %ymm{}, %ymm{}, %ymm{}".format(h5, im1, h1)) # currently alive: h0, h1, h2, h3, h4, h5, h6 h = [h0, h1, h2, h3, h4, h5, h6] # TODO replace vmovdqu with vmovdqa when possible (calculate alignment?) def get_limb(limbreg, i, j, off=0): p("vmovdqu {}({}), %ymm{}".format((off + i*208 + j * 52 + coeff*16) * 2, r_real, limbreg)) def store_limb(limbreg, i, j, off=0): if coeff == 3: if i == 3 and j >= 4: # this part exceeds 832 return p("vmovq %xmm{}, {}({})".format(limbreg, (off + i*208 + j * 52 + coeff*16) * 2, r_real)) else: if i == 3 and j >= 4: # this part exceeds 832 return p("vmovdqu %ymm{}, {}({})".format(limbreg, (off + i*208 + j * 52 + coeff*16) * 2, r_real)) tmp = alloc() get_limb(tmp, 0, j, off=0) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[0], tmp)) store_limb(tmp, 0, j, off=0) get_limb(tmp, 1, j, off=0) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[1], tmp)) store_limb(tmp, 1, j, off=0) # Handle portions of h[2] that do not wrap around if j < 7 or (j == 7 and coeff < 2): get_limb(tmp, 2, j, off=0) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[2], tmp)) store_limb(tmp, 2, j, off=0) # Handle portions of h[2] that wrap if j == 7 and coeff == 2: # Add 9 words at result[812:] tmp2 = alloc() get_limb(tmp, 2, j, off=0) p("vpand mask_9_7(%rip), %ymm{}, %ymm{}".format(h[2], tmp2)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, tmp2, tmp)) store_limb(tmp, 2, j, off=0) free(tmp2) # Add the high 7 words to result[0:] # - rotate left by 1 word in each lane, swap lanes, mask p("vpshufb rol_rol_16(%rip), %ymm{}, %ymm{}".format(h[2], h[2])) p("vextracti128 $1, %ymm{}, %xmm{}".format(h[2],h[2])) p("vpand mask_7_9(%rip), %ymm{}, %ymm{}".format(h[2], h[2])) get_limb(tmp, 0, 0, off=(0-16*coeff)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[2], tmp)) store_limb(tmp, 0, 0, off=(0-16*coeff)) # Handle portions of h[2] after limb wrap if j == 7 and coeff == 3: get_limb(tmp, 0, 0, off=(7-16*coeff)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[2], tmp)) store_limb(tmp, 0, 0, off=(7-16*coeff)) # Handle portions of h[3] that do not wrap around if j < 3 or (j == 3 and coeff < 2): get_limb(tmp, 3, j, off=0) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[3], tmp)) store_limb(tmp, 3, j, off=0) # Handle portions of h[3] where limb wraps # h[3] holds a segment aligned to result 624:831 # starting at 624 + 52*j + (0,16,16,16)[coeff] # and of length (16,16,16,12)[coeff] # 821 = 3*208 + 3*52 + 2*16 + 9 # wrap when j=3, coeff=2 if j == 3 and coeff == 2: # Add 9 words at result[812:] tmp2 = alloc() get_limb(tmp, 3, j, off=0) p("vpand mask_9_7(%rip), %ymm{}, %ymm{}".format(h[3], tmp2)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, tmp2, tmp)) store_limb(tmp, 3, j, off=0) free(tmp2) # Add the high 7 words to result[0:] # - rotate left by 1 word in each lane, swap lanes, mask p("vpshufb rol_rol_16(%rip), %ymm{}, %ymm{}".format(h[3], h[3])) p("vextracti128 $1, %ymm{}, %xmm{}".format(h[3],h[3])) p("vpand mask_7_9(%rip), %ymm{}, %ymm{}".format(h[3], h[3])) get_limb(tmp, 0, 0, off=(0-16*coeff)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[3], tmp)) store_limb(tmp, 0, 0, off=(0-16*coeff)) # Handle portions of h[3] after limb wrap if j == 3 and coeff == 3: get_limb(tmp, 0, 0, off=(7-16*coeff)) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[3], tmp)) store_limb(tmp, 0, 0, off=(7-16*coeff)) if j >= 4: get_limb(tmp, 0, j-4, off=11) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[3], tmp)) store_limb(tmp, 0, j-4, off=11) get_limb(tmp, 0, j, off=11) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[4], tmp)) store_limb(tmp, 0, j, off=11) get_limb(tmp, 1, j, off=11) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[5], tmp)) store_limb(tmp, 1, j, off=11) if j < 7 or (j == 7 and coeff < 2): get_limb(tmp, 2, j, off=11) p("vpaddw %ymm{}, %ymm{}, %ymm{}".format(tmp, h[6], tmp)) store_limb(tmp, 2, j, off=11) free(tmp) free(h0, h1, h2, h3, h4, h5, h6) p("mov %r8, %rsp") p("pop %r12") # restore callee-saved r12 p("ret")
998,688
357e7f305eef49676e072ca8a0c667e1f18b492e
#!/usr/bin/python import sys gfafile = sys.argv[1] goodnodefile = sys.argv[2] startnode = sys.argv[3] # format 123 without <> node_lens = {} one_outedge = {} with open(gfafile) as f: for l in f: parts = l.strip().split('\t') if parts[0] == 'S': node_lens[parts[1]] = len(parts[2]) elif parts[0] == 'L': if parts[2] == "-": assert parts[4] == "-" (parts[1], parts[3]) = (parts[3], parts[1]) (parts[2], parts[4]) = (parts[4], parts[2]) parts[2] = "+" parts[4] = "+" assert parts[2] == "+" assert parts[4] == "+" fromnode = parts[1] tonode = parts[3] if fromnode not in one_outedge: one_outedge[fromnode] = tonode if tonode[0:4] == "end_": one_outedge[fromnode] = tonode goodnodes = set() with open(goodnodefile) as f: for l in f: goodnodes.add(l.strip()) pos = startnode path = [] distance = 0 while pos in one_outedge: if pos in goodnodes: path.append((pos, distance)) distance = 0 else: distance += node_lens[pos] pos = one_outedge[pos] for n in path: print(n[0] + "\t" + str(n[1]))
998,689
7680ac3bf4ab0144841638563c74eb379f69a829
import re import os import numpy as np import pandas as pd import multiprocessing match = 3 mismatch = -3 gap = -2 #空位罚分 空位权值恒定模型 # 蛋白质替换记分矩阵用BLOSUM-62 S_matrix = [[9,-1,-1,-3,0,-3,-3,-3,-4,-3,-3,-3,-3,-1,-1,-1,-1,-2,-2,-2], [-1,4,1,-1,1,0,1,0,0,0,-1,-1,0,-1,-2,-2,-2,-2,-2,-3], [-1,1,4,1,-1,1,0,1,0,0,0,-1,0,-1,-2,-2,-2,-2,-2,-3], [-3,-1,1,7,-1,-2,-1,-1,-1,-1,-2,-2,-1,-2,-3,-3,-2,-4,-3,-4], [0,1,-1,-1,4,0,-1,-2,-1,-1,-2,-1,-1,-1,-1,-1,-2,-2,-2,-3], [-3,0,1,-2,0,6,-2,-1,-2,-2,-2,-2,-2,-3,-4,-4,0,-3,-3,-2], [-3,1,0,-2,-2,0,6,1,0,0,-1,0,0,-2,-3,-3,-3,-3,-2,-4], [-3,0,1,-1,-2,-1,1,6,2,0,-1,-2,-1,-3,-3,-4,-3,-3,-3,-4], [-4,0,0,-1,-1,-2,0,2,5,2,0,0,1,-2,-3,-3,-3,-3,-2,-3], [-3,0,0,-1,-1,-2,0,0,2,5,0,1,1,0,-3,-2,-2,-3,-1,-2], [-3,-1,0,-2,-2,-2,1,1,0,0,8,0,-1,-2,-3,-3,-2,-1,2,-2], [-3,-1,-1,-2,-1,-2,0,-2,0,1,0,5,2,-1,-3,-2,-3,-3,-2,-3], [-3,0,0,-1,-1,-2,0,-1,1,1,-1,2,5,-1,-3,-2,-3,-3,-2,-3], [-1,-1,-1,-2,-1,-3,-2,-3,-2,0,-2,-1,-1,5,1,2,-2,0,-1,-1], [-1,-2,-2,-3,-1,-4,-3,-3,-3,-3,-3,-3,-3,1,4,2,1,0,-1,-3], [-1,-2,-2,-3,-1,-4,-3,-4,-3,-2,-3,-2,-2,2,2,4,3,0,-1,-2], [-1,-2,-2,-2,0,-3,-3,-3,-2,-2,-3,-3,-2,1,3,1,4,-1,-1,-3], [-2,-2,-2,-4,-2,-3,-3,-3,-3,-3,-1,-3,-3,0,0,0,-1,6,3,1], [-2,-2,-2,-3,-2,-3,-2,-3,-2,-1,2,-2,-2,-1,-1,-1,-1,3,7,2], [-2,-3,-3,-4,-3,-2,-4,-4,-3,-2,-2,-3,-3,-1,-3,-2,-3,1,2,11]] amino_acid = ['C','S','T','P','A', 'G', 'N','D','E','Q','H','R','K', 'M','I','L','V','F','Y','W'] def read_fasta_file(fasta_file): ''' 读取protein sequence的fasta格式数据 :param fasta_file: fasta数据所在地址 :return: key为蛋白质名称,value为sequence的字典 ''' fp = open(fasta_file, 'r') allsequences = [] sequence = '' i = 0 for line in fp: # let's discard the newline at the end (if any) line = line.rstrip().strip('*') if line == '': pass # distinguish header from sequence elif line[0] == '>': # or line.startswith('>') if i == 0: i += 1 else: allsequences.append(sequence) sequence = '' else: # it is sequence sequence += line allsequences.append(sequence) fp.close() print(len(allsequences)) return allsequences def s_w(seqA, allseq, savepath, num): #num 序列的index scorelist = [0]*(num) # seqA之前的序列已经比较过,得分直接置0 print('Comparing the %d sequence'%(num+1)) # 计算得分矩阵 cols = len(seqA) for seqB in allseq: rows = len(seqB) matrix = [[0 for row in range(rows+1)] for col in range(cols+1)] paths = [[0 for row in range(rows+1)] for col in range(cols+1)] max_score = 0 finalscore = 0 for i in range(cols): for j in range(rows): a1 = amino_acid.index(seqA[i]) a2 = amino_acid.index(seqB[j]) s = S_matrix[a1][a2] if seqA[i] == seqB[j]: diag = matrix[i][j] + s else: diag = matrix[i][j] + s up = matrix[i + 1][j] + gap left = matrix[i][j + 1] + gap score = max(0,diag, up, left) matrix[i+1][j+1] = score if score > max_score: max_score = score start_pos = [i+1, j+1] if matrix[i+1][j+1] == diag and matrix[i+1][j+1] != 0: paths[i+1][j+1] = 'diag' elif matrix[i+1][j+1] == up and matrix[i+1][j+1] != 0: paths[i+1][j+1] = 'up' elif matrix[i+1][j+1] == left and matrix[i+1][j+1] != 0: paths[i+1][j+1] = 'left' #根据path回溯计算得分 i, j = start_pos start_path = paths[i][j] while start_path != 0: finalscore += matrix[i][j] if start_path == 'diag': i, j = i-1, j-1 elif start_path == 'up': j = j-1 else: i = i-1 start_path = paths[i][j] scorelist.append(finalscore) np.savetxt(savepath, scorelist, delimiter=',', fmt='%f') def generated_SW_matrix(filename,path): allsequence = read_fasta_file(filename) pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) for i in range(len(allsequence)): savepath = path + str(i + 1) + '.txt' sequence1 = allsequence[i] sequence2 = allsequence[i:] pool.apply_async(s_w, (sequence1, sequence2, savepath, i,)) pool.close() pool.join() scorematrix = [] for i in range(len(allsequence)): alignpath = path + str(i + 1) + '.txt' alignlist = pd.read_csv(alignpath, header=None, index_col=None) alignlist = np.array(alignlist) alignlist = alignlist.T scorematrix.append(alignlist[0]) finalmatrix = np.array(scorematrix) for j in range(finalmatrix.shape[1]): for i in range(finalmatrix.shape[0]): finalmatrix[i][j] = finalmatrix[j][i] np.savetxt(os.path.join(path, r'protein sw_smilarity matrix.csv'), finalmatrix, delimiter=',', fmt='%f') # np.savetxt(os.path.join(path, r'protein sw_test.csv'), finalmatrix, delimiter=',', fmt='%f') if __name__ == '__main__': generated_SW_matrix(filename = '../data/generated_data/NPInter_4158/protein_extracted_seq.fasta', path = '../data/generated_data/NPInter_4158/') generated_SW_matrix(filename = '../data/generated_data/NPInter_10412/protein_extracted_seq.fasta', path = '../data/generated_data/NPInter_10412/') generated_SW_matrix(filename='../data/generated_data/RPI2241/protein_extracted_seq.fasta', path='../data/generated_data/RPI2241/') generated_SW_matrix(filename='../data/generated_data/RPI369/protein_extracted_seq.fasta', path='../data/generated_data/RPI369/') generated_SW_matrix(filename='../data/generated_data/RPI7317/protein_extracted_seq.fasta', path='../data/generated_data/RPI7317/')
998,690
10aaf983f059a8c1eb5ba4eb506d1892b14899b2
# Generated by Django 2.2 on 2019-04-26 05:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('NewsFeed', '0002_subscribe'), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_created', models.DateTimeField(auto_now_add=True, help_text='항목이 생성된 시점으로 자동 추가', verbose_name='생성일시')), ('date_modified', models.DateTimeField(auto_now=True, help_text='항목이 수정된 시점으로 수정시 자동 변경', verbose_name='수정일시')), ('content', models.TextField(verbose_name='글 내용')), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articles', to=settings.AUTH_USER_MODEL, verbose_name='작성자')), ('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articles', to='NewsFeed.School', verbose_name='대상 학교')), ], options={ 'abstract': False, }, ), ]
998,691
38865c2e65cc9019abf01186eb4ddc736e2a7506
""" WHAT - model HOW - xxx """ import os from flask import ( Blueprint, flash, redirect, render_template, request, session, url_for ) from models.__init__ import init_db from models.card import Card from models.plan import ( get_rand_card, get_plan, get_card_by_id, set_known, Plan ) from models.words_txt_insert_db import insert_from_txt from utils import ( log, debug, ) main = Blueprint('index', __name__) """ 用户在这里可以 首页 """ # Uncomment and use this to initialize database, then comment it # You can rerun it to pave the database and start over @main.route('/init_db') def init_database(): log('调用路由') init_db() return 'Initialized the database.' @main.route("/") def index(): log('调用路由') # todo user # if session.get('logged_in'): # return render_template("index.html") # else: # return redirect(url_for('login')) # log('GET /.') return render_template("index.html") @main.route("/new") def new_card(): log('调用路由') log('GET /new.') return render_template("add_card.html") # 转换[] 运行[] @main.route("/add", methods=['POST']) def add_card(): log('调用路由') form = request.form # todo 用户判断 # u = current_user() log(f'Card: {Card}') card_id = Card.new(form) plan = Plan() plan.insert_id(card_id) return redirect(url_for('.review', card_id=card_id)) @main.route("/cards") def list_cards(): log('调用路由') cs = Card.all() # for test # cs 是什么 # c = cs[0] # log(f'c: {c.keys()}') # log(f'c: {type(c)}') if not cs: flash('没有一张卡片。') return render_template('list_cards.html', cs=cs) @main.route("/list_today_cards") def list_today_cards(): log('调用路由') cs = get_plan() if not cs: flash('没有一张卡片。') return render_template('list_cards.html', cs=cs) @main.route("/review") @main.route("/review/<card_id>") def review(card_id=None): log('调用路由') log(f'card_id: {type(card_id)}') return memorize(card_id) # plans = Plan.today_plan() # if [[]] == plans: # flash('今天没有复习内容。') # return render_template('review.html', plans=plans) def memorize(card_id): if card_id: # todo 验证run[] card = get_card_by_id(card_id) log(f'看看 {card_id} 能否找到对应的 {card}') if not card: card = get_rand_card() else: # todo 验证run[]. card = get_rand_card() if not card: # run[x] flash("You've learned all cards.") return redirect(url_for('.list_today_cards')) # todo run[] # debug(f'card: {card}') return render_template('review.html', card=card) @main.route("/txt_insert_card") def txt_insert_card(): log('调用路由') insert_from_txt() flash('插入成功.') return redirect(url_for('.index')) @main.route('/mark_known/<card_id>') def mark_known(card_id): log('调用路由') log('观察变化: ') log(f"card_id: {card_id}") set_known(card_id) flash(f"Set card_id-{card_id} known.") # log(session['unknown']) # session['unknown'] = [11, 12] # session['unknown'].remove(11) # log(session['unknown']) return redirect(url_for('.review')) @main.route("/test") def test(): log('调用路由') return render_template('test.html')
998,692
50bf66278380a01cc5750ec8a458ba98c32df675
"""djEmpleosIngenia URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin from django.conf import settings from django.conf.urls.static import static from homeApp import viewsH from ingeniaApp import viewsI from vacantesApp import viewsV from contactoApp import viewsC from beneficiosApp import viewsB urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^$', viewsH.renderHome, name='urlHome'), url(r'^ingenia/$', viewsI.renderIngenia, name='urlIngenia'), url(r'^beneficios/$', viewsB.renderBeneficios, name='urlBeneficios'), url(r'^vacantes/$', viewsV.renderVacantes, name='urlVacantes'), url(r'^contacto/$', viewsC.renderContactoIngenia, name='urlContacto'), url(r'^exito/$', viewsC.renderExito, name='urlExito'), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
998,693
9ae998e6576759d73a389051d1f9b07916ac9423
#!/usr/bin/env python # coding: utf-8 # # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 5</font> # # ## Download: http://github.com/dsacademybr # In[ ]: # Versão da Linguagem Python from platform import python_version print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version()) # ## Exercícios # In[2]: # Exercício 1 - Crie um objeto a partir da classe abaixo, chamado roc1, passando 2 parâmetros e depois faça uma chamada # aos atributos e métodos from math import sqrt class Rocket(): def __init__(self, x=0, y=0): self.x = x self.y = y def move_rocket(self, x_increment=0, y_increment=1): self.x += x_increment self.y += y_increment def print_rocket(self): print(self.x, self.y) # In[3]: roc1 = Rocket(1,2)#instancia a classe com nome roc1 # In[4]: roc1.x #chama atributo x # In[5]: roc1.y #chama atributo y # In[6]: roc1.move_rocket() # aciona o metodo comum move_rocket onde duas vairaveisinternas _incrment sao incializadas com 0 e 1 # e assim incrementam de acordo com as variavieis incializadas ao instnciar o objeto e depois soma # In[8]: roc1.print_rocket()#imprime os valores dos atributos incrementados # In[ ]: # Exercício 2 - Crie uma classe chamada Pessoa() com os atributos: nome, cidade, telefone e e-mail. Use pelo menos 2 # métodos especiais na sua classe. Crie um objeto da sua classe e faça uma chamada a pelo menos um dos seus métodos # especiais. # In[18]: class Pessoa():#sempre lembrar que o nome da classe deve ser maiusuculo. Cria a Classe pessoa def __init__(self,nome,cidade,telefone,email):#metodo par aincializar os atributos de um objeto print("Construtor OK!")#Imprime uma mensagem informando sobre a construção do objeto de acordo com atributos self.nome = nome#instancia o atributo nome do objeto self.cidade = cidade#instancia o atributo nome do objeto self.telefone = telefone#instancia o atributo nome do objeto self.email = email#instancia o atributo nome do objeto def cidadebeaga(self):#criei um metodo comum que verifica se a cidade do objeto e BH, se for imprime voce e de BH if self.cidade == "BH": return "Voce e de Belo Horixonte" else: return "voce nao e de bh" def __str__(self):#este metodo especial retorna um texto string return "O funcionario é: %s, que mora na cidade:%s, com telefone:%s, e email:%s"%(self.nome,self.cidade, self.telefone,self.email) # In[19]: thiago = Pessoa("thiago paoli","BH",3134522177,"tpacheco25@hotmail.com")#instancia o objeto chamado de thiago a classe pessoas #com os atrobutos passados como parametro # In[20]: str(thiago)#chamei o metodo str para o objeto thiago # In[21]: thiago.cidadebeaga()#aqui chamei o metodo comum se confere se a instancia cidade do objeto criado e BH se for retorna que # e de belo horizonte, senao retorna outro texto # In[23]: pessoa2 = Pessoa("Joao","SP",1134558978,"joao@gmail.com")#aqui criei um novo objeto chamado pessoa 2 passando os parametros # In[30]: #imprimi os atributos print(pessoa2.nome) print(pessoa2.cidade) print(pessoa2.telefone) print(pessoa2.email) # In[31]: str(pessoa2)#chama o metodo especial str que retorna uma string # In[33]: pessoa2.cidadebeaga()#chamei novamente o metodo para conferir SE A NOVA pessoa e ou nao de BH # In[ ]: # Exercício 3 - Crie a classe Smartphone com 2 atributos, tamanho e interface e crie a classe MP3Player com os # atributos capacidade. A classe MP3player deve herdar os atributos da classe Smartphone. # In[42]: class Smartphone(object):#aqui criei a classe smartphone como objeto, que tem somente um metodo , sendo ele especial constru #tor, que recebe os atributos tamanho e interface e imprime a informação de construtor ok def __init__(self,tamanho,interface):#inicializa e recebe os parametros print("Construtor OK!!!") #informa construção ok self.tamanho = tamanho#recebe via parametro o tamanha como atributo do objeto self.interface = interface #recebe via parametro a interface como atributo do objeto # In[49]: class MP3Layer(Smartphone):#cria a classe MP3 com um init apontando para a classe smartphone def __init__(self,capacidade,tamanho,interface):#cria o incializador do MP3 Smartphone.__init__(self,tamanho,interface)#cria herança com a classe smartphone herdando todos atributos self.capacidade = capacidade#criei um atributo espeficico da sub classe print("Objeto Criado") # In[50]: tel1 = MP3Layer("mega","Grande","plus")#instanciei o tel 1 como objeto da subclasse mp3layer que herda dados da classe #smartphone # ### FIM # In[52]: #Veja que mesmo instanciando a subclasee MP3Layer, consegui acesso ao atributo da classe smartphone,herdada para sub print(tel1.tamanho) print(tel1.capacidade) # ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
998,694
e205852f3196394e6fce50ef0bd3e91b97d3e6a4
import time from selenium import webdriver # Optional argument, if not specified will search path. driver = webdriver.Chrome( '/home/fenris/work/Kamtech/Scrapers/LinkScraper/src/chromedriver_1') driver.get('http://www.google.com/') time.sleep(5) # Let the user actually see something! search_box = driver.find_element_by_name('q') search_box.send_keys('ChromeDriver') search_box.submit() time.sleep(5) # Let the user actually see something! driver.quit()
998,695
caf63c0023d51a7f6ac0aa52fc8a87557e1b219a
# Register your models here from .models import * from django.contrib import admin admin.site.register(Question) admin.site.register(Answer) admin.site.register(Comment) admin.site.register(Topic) admin.site.register(Discussion) admin.site.register(Vote) admin.site.register(Message) admin.site.register(Thread) admin.site.register(Tag) admin.site.register(MarkedTag) admin.site.register(Search) admin.site.register(Category) admin.site.register(Post) admin.site.register(NotificationEmail) admin.site.register(ReplyAddress)
998,696
10b31b4bccb5ea90d1f952512d6bb98f46f3ba75
from trlib.algorithms.algorithm_handler import Double_FQI_SSEP_Handler import numpy as np import json def calculate_q_values(double_fqi_ss_ep_handler, file_name): assert isinstance(double_fqi_ss_ep_handler, Double_FQI_SSEP_Handler) q_max_list = [] q_min_list = [] persistences = double_fqi_ss_ep_handler.get_persistences() number_of_persistences = len(persistences) sa, q_functions = double_fqi_ss_ep_handler.get_data_and_qs() for i in range(number_of_persistences): vals1 = q_functions[i][0].values(sa) vals2 = q_functions[i][1].values(sa) vals = np.column_stack((vals1, vals2)) maxq = np.amax(vals, axis=1) minq = np.amin(vals, axis=1) q_max_list.append(np.average(maxq).item()) q_min_list.append(np.average(minq).item()) output = [q_max_list, q_min_list] with open('Q_bounds' + file_name + '.json', 'w') as f: f.write(json.dumps(output))
998,697
692aab5f54e4f254f5c2cd69527265ba950e7374
""" In this kata you have to create all permutations of an input string and remove duplicates, if present. This means, you have to shuffle all letters from the input in all possible orders. Examples: permutations('a'); # ['a'] permutations('ab'); # ['ab', 'ba'] permutations('aabb'); # ['aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'] The order of the permutations doesn't matter. """ import itertools def permutations(string): return set([''.join(i) for i in itertools.permutations(string)])
998,698
200370c10ce61c0325509c20e71844381191c39a
# intersection_update()方法:用于获取两个或更多集合中都重叠的元素,即计算交集 # intersection_update() 方法不同于 intersection() 方法,因为 intersection() 方法是返回一个新的集合, # 而 intersection_update() 方法是在原始的集合上移除不重叠的元素 set23 = {'a', 'b', 1, 2} set24 = {'a', 'c', 3, 2} set23.intersection_update(set24) print(set23)
998,699
5dee86f77bf7b9f3b813616a4352a485ac9d2b0a
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import unicode_literals import pytest from data_pipeline.schematizer_clientlib.models.refresh import Priority from data_pipeline.tools.refresh_requester import FullRefreshRequester class TestFullRefreshRequester(object): @pytest.fixture def source(self, namespace, registered_schema): return registered_schema.topic.source @pytest.fixture def refresh_requester(self, containers): return FullRefreshRequester() def test_invalid_priority(self, refresh_requester): with pytest.raises(KeyError): refresh_requester.process_commandline_options(['--priority=INVALID', "--source-id=1"]) refresh_requester.run() def test_run_invalid_batch(self, refresh_requester): with pytest.raises(ValueError) as e: refresh_requester.process_commandline_options( [ '--priority=MEDIUM', '--batch-size=0', "--source-id=1" ] ) assert e.value.message == "--batch-size option must be greater than 0." def test_run_no_source_id(self, refresh_requester): with pytest.raises(ValueError) as e: refresh_requester.process_commandline_options( [ '--priority=MEDIUM', '--batch-size=50', ] ) assert e.value.message == "--source-id or both of--source-name and --namespace must be defined" def test_run_only_source_name(self, refresh_requester): with pytest.raises(ValueError) as e: refresh_requester.process_commandline_options( [ '--priority=MEDIUM', '--batch-size=50', '--source-name=test' ] ) assert e.value.message == "--source-id or both of--source-name and --namespace must be defined" def test_run_only_namespace(self, refresh_requester): with pytest.raises(ValueError) as e: refresh_requester.process_commandline_options( [ '--priority=MEDIUM', '--batch-size=50', '--namespace=test' ] ) assert e.value.message == "--source-id or both of--source-name and --namespace must be defined" def test_valid_run(self, refresh_requester, source): refresh_requester.process_commandline_options( [ '--source-id=' + str(source.source_id), '--batch-size=250', '--priority=MAX', '--offset=0' ] ) actual_refresh = refresh_requester.create_request() self._check_refresh(actual_refresh, source.name, None) def test_valid_run_namespace_source_name(self, refresh_requester, source): refresh_requester.process_commandline_options( [ '--source-name=' + source.name, '--namespace=' + source.namespace.name, '--batch-size=250', '--priority=MAX', '--offset=0' ] ) actual_refresh = refresh_requester.create_request() self._check_refresh(actual_refresh, source.name, None) def test_invalid_run_namespace_source_name_not_found(self, refresh_requester, source): with pytest.raises(ValueError) as e: refresh_requester.process_commandline_options( [ '--source-name=bad_source_that_doesnt_exist', '--namespace=' + source.namespace.name, '--batch-size=250', '--priority=MAX', '--offset=0' ] ) refresh_requester.run() assert "Found no sources" in e.value.message def test_valid_with_avg_rows_per_second_cap(self, refresh_requester, source): refresh_requester.process_commandline_options( [ '--source-id=' + str(source.source_id), '--batch-size=250', '--priority=MAX', '--offset=0', '--avg-rows-per-second-cap=100' ] ) actual_refresh = refresh_requester.create_request() self._check_refresh(actual_refresh, source.name, 100) def _check_refresh(self, refresh, source_name, avg_rows_per_second_cap): assert refresh.source_name == source_name assert refresh.avg_rows_per_second_cap == avg_rows_per_second_cap assert refresh.priority == Priority.MAX.value assert refresh.status.value == "NOT_STARTED" assert refresh.offset == 0 assert refresh.batch_size == 250