index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,100 | a1aaa1eff27e7d8b857707cfc810c10014a039e5 | from pyspark import SparkConf, SparkContext
import string
conf = SparkConf().setMaster('local').setAppName('P23_spark')
sc = SparkContext(conf = conf)
def calcmedia(line):
med = (line[1]+line[2]+line[3]+line[4])/4
return (line[0], med)
# ['2008',152.830978]
def formatear(line):
return ((line[0].split('-'))[0],float(line[4]))
rdd = sc.textFile("GOOG.csv")
# Date,Open,High,Low,Close,Adj Close,Volume
# 2008-12-31,151.117126,154.495163,150.327271,152.830978,152.830978,5811000
header = rdd.first()
# Date,Open,High,Low,Close,Adj Close,Volume
rdd = rdd.filter(lambda line: line!=header)
# 2008-12-31,151.117126,154.495163,150.327271,152.830978,152.830978,5811000
lines = rdd.map(lambda line: line.encode("ascii", "ignore").split(","))
# [[line],[line],...] -- Lineas sin comas
# ['2008-12-31','151.117126','154.495163','150.327271','152.830978','152.830978','5811000']
linesNum = lines.map(formatear)
# ['2008',152.830978]
linesNumAcum = linesNum.reduceByKey(lambda acum,n: acum+n)
# ['anyo',sumtotal]
# Calcular el mumero de dias de cada anyo
# ------------------------------
diasAnyo = linesNum.map(lambda tupla: (tupla[0],1))
# ('2008',1) -- Media diaria
diasAnyo = diasAnyo.reduceByKey(lambda acum,n: acum+n)
# ('2008',numDias) -- Numero de dias contabilizados del anyo
# ------------------------------
joinRDD = linesNumAcum.join(diasAnyo)
# ('2008', (sumtotal, numDias)
mediaAnual = joinRDD.map(lambda tupla: (tupla[0], tupla[1][0]/tupla[1][1]))
# ('2008', media)
mediaAnual = mediaAnual.sortByKey()
mediaAnual.saveAsTextFile("output23")
|
999,101 | 25ab797620e6a15d944ea95565d18f2b5d8c34ee | n=[]
while len(n) <5:
x=int(input("ingrese un numero "))
n.append(x)
def menor_en_arreglo():
for i in n:
menor = i< i
print (menor)
menor_en_arreglo() |
999,102 | b7661508808a09f519ccb88155183f788e6a5baa | #!/usr/bin/env python
# coding: utf-8
# In[89]:
import numpy as np
import matplotlib.pyplot as plt
import imageio
# # Convolution = Intutively
# In[90]:
url = 'https://source.unsplash.com/U66avewmxJk/400x300'
# In[91]:
cheetah = imageio.imread(url)
# In[92]:
plt.imshow(cheetah)
plt.show()
# In[93]:
cheetah.shape # 300x400 pixel and 3 layers
# In[94]:
import skimage
# In[95]:
image_gray = skimage.color.rgb2gray(cheetah)
# In[96]:
image_gray.shape
# In[97]:
plt.imshow(image_gray, cmap= 'gray')
plt.show()
# In[98]:
plt.imshow(image_gray, cmap= 'cool')
plt.show()
# In[99]:
plt.imshow(image_gray, cmap= 'hot')
plt.show()
# In[100]:
image = image_gray.reshape(300,400,1)
# In[101]:
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation
# In[102]:
filters=1
kernel_size=(2,2)
# In[103]:
model=Sequential()
model.add(Conv2D(filters, kernel_size, input_shape=(300,400,1)))
# In[104]:
model.summary()
# In[105]:
def convolution(image, model):
print ('Original Shape: ', image.shape)
image = image/255
image_batch = np.array([image])
conv_image = model.predict (image_batch)
conv_image = conv_image.squeeze()
print('New Shape: ', conv_image.shape)
plt.imshow(conv_image, cmap ="gray")
# these are color that you can replace in the quotes { Colormap heat is not recognized. Possible values are: Accent, Accent_r,
#Blues, Blues_r, BrBG, BrBG_r, BuGn, BuGn_r, BuPu, BuPu_r, CMRmap, CMRmap_r, Dark2, Dark2_r, GnBu, GnBu_r, Greens, Greens_r,
#Greys, Greys_r, OrRd, OrRd_r, Oranges, Oranges_r, PRGn, PRGn_r, Paired, Paired_r, Pastel1, Pastel1_r, Pastel2, Pastel2_r,
#PiYG, PiYG_r, PuBu, PuBuGn, PuBuGn_r, PuBu_r, PuOr, PuOr_r, PuRd, PuRd_r, Purples, Purples_r, RdBu, RdBu_r, RdGy, RdGy_r, RdPu,
#RdPu_r, RdYlBu, RdYlBu_r, RdYlGn, RdYlGn_r, Reds, Reds_r, Set1, Set1_r, Set2, Set2_r, Set3, Set3_r, Spectral, Spectral_r,
#Wistia, Wistia_r, YlGn, YlGnBu, YlGnBu_r, YlGn_r, YlOrBr, YlOrBr_r, YlOrRd, YlOrRd_r, afmhot, afmhot_r, autumn, autumn_r,
#binary, binary_r, bone, bone_r, brg, brg_r, bwr, bwr_r, cividis, cividis_r, cool, cool_r, coolwarm, coolwarm_r, copper,
#copper_r, cubehelix, cubehelix_r, flag, flag_r, gist_earth, gist_earth_r, gist_gray, gist_gray_r, gist_heat, gist_heat_r,
#gist_ncar, gist_ncar_r, gist_rainbow, gist_rainbow_r, gist_stern, gist_stern_r, gist_yarg, gist_yarg_r, gnuplot, gnuplot2,
#gnuplot2_r, gnuplot_r, gray, gray_r, hot, hot_r, hsv, hsv_r, inferno, inferno_r, jet, jet_r, magma, magma_r, nipy_spectral,
#nipy_spectral_r, ocean, ocean_r, pink, pink_r, plasma, plasma_r, prism, prism_r, rainbow, rainbow_r, seismic, seismic_r,
#spring, spring_r, summer, summer_r, tab10, tab10_r, tab20, tab20_r, tab20b, tab20b_r, tab20c, tab20c_r, terrain, terrain_r,
#twilight, twilight_r, twilight_shifted, twilight_shifted_r, viridis, viridis_r, winter, winter_r}
# In[106]:
convolution(image, model)
# In[107]:
model.layers[0].get_weights()
# # Conv + Activation
# In[108]:
model2 = Sequential()
model2.add(Conv2D(1,(3,3), input_shape=(300,400,1)))
model2.add(Activation("relu"))
# In[109]:
convolution(image, model2)
# # Conv + Pooling
# In[110]:
model3 = Sequential()
model3.add(Conv2D(1,(8,8), input_shape=(300,400,1)))
model3.add(MaxPooling2D(pool_size=(2,2)))
# In[111]:
convolution(image, model3)
# # Conv + Activation + Pooling
# In[129]:
model4 = Sequential()
model4.add(Conv2D(1,(), input_shape=(300,400,1)))
model4.add(Activation("relu"))
model4.add(MaxPooling2D(pool_size=(2,2)))
# In[130]:
convolution(image, model4)
# In[ ]:
# In[ ]:
|
999,103 | 7c117e0374b2959a9179b4e074484dd858f9151f | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Firm(scrapy.Item):
"""firm's general information"""
firm_id = scrapy.Field()
name = scrapy.Field()
rank = scrapy.Field()
tax_code = scrapy.Field()
listing_status = scrapy.Field()
headquarter = scrapy.Field()
tel = scrapy.Field()
fax = scrapy.Field()
email = scrapy.Field()
website = scrapy.Field()
category = scrapy.Field()
founded_date = scrapy.Field()
company_type = scrapy.Field()
|
999,104 | a8737764f994d4e572c907256611a9fea3588a9e | import csv
with open("merge_yes.csv", "r") as f:
first = {rows[0]: rows[1:] for rows in list(csv.reader(f))}
with open("house_prices.csv", "r") as f:
for row in csv.reader(f):
#print(row[1])
if row[0] in first:
first[row[0]].append(row[1])
#converting dictionary back to list
merged = [(k,) + tuple(v) for k, v in first.items()]
with open("train.csv", "w") as f:
csv.writer(f).writerows(merged)
|
999,105 | 1caf52070e17177651d268f7cb88e749611d00e4 | # -*- coding: utf-8 -*-
# @Author: Alexander Sharov
import json
def json2poly(file):
with open(file, 'r', encoding='utf8') as data_file:
data = json.load(data_file)
points = data['features'][0]['geometry']['coordinates'][0]
print(points)
count = len(points)
print('%d 2 0 0' % count)
for point, point_number in zip(points, range(len(points))):
print('%d %g %g' % (point_number + 1, point[0], point[1]))
print('%d 0' % count)
for ind in range(count):
if ind == count:
print('%d %d 0' % (ind + 1, ind + 1))
else:
print('%d %d %d' % (ind + 1, ind + 1, ind + 2))
print('1')
print('1 0 0')
print('0')
|
999,106 | a795d1171f585b4cd10f4ace1d6643977a8010a1 | """
we will cover tkinter event handling in this part.
In this scenario, we are adding a quit event to our quit button, which currently does nothing when clicked on.
In basically every circumstance, we're going to want to have our buttons actually do something or perform an action rather than just appear there.
This is called an event when someone clicks on something, and we can write code to handle events.
Generally, we want to write code that is in line what the expectation of the user that created the event.
The more in-line your program can be with what the user intends to happen with their events, the more user-friendly it is going to be.
In tkinter, event handling is as simple as adding a command, which we'll make into a function.
Even though this function we create is a basic 1-line function that simply calls another function, we can see how we can later create more complex functions for our events.
"""
from tkinter import *
# Here, we are creating our class, Window, and inheriting from the Frame
# class. Frame is a class from the tkinter module. (see Lib/tkinter/__init__)
class Window(Frame):
# Define settings upon initialization. Here you can specify
def __init__(self, master=None):
# parameters that you want to send through the Frame class.
Frame.__init__(self, master)
#reference to the master widget, which is the tk window
self.master = master
#with that, we want to then run init_window, which doesn't yet exist
self.init_window()
#Creation of init_window
def init_window(self):
# changing the title of our master widget
self.master.title("GUI")
# allowing the widget to take the full space of the root window
self.pack(fill=BOTH, expand=1)
# creating a button instance
quitButton = Button(self, text="Exit",command=self.client_exit)
# placing the button on my window
quitButton.place(x=0, y=0)
def client_exit(self):
exit()
# root window created. Here, that would be the only window, but
# you can later have windows within windows.
root = Tk()
root.geometry("400x300")
#creation of an instance
app = Window(root)
#mainloop
root.mainloop()
|
999,107 | 1b8b5c9c74943c6c3d1c15fdafe4fa6660474a69 | # -*- coding: utf-8 -*-
import threading
import time
from route import *
#Importer le module training.py pour faire la VOD | nPVR
from checkConfig import *
allgo = threading.Condition()
class Process(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def execution(self):
""" Recupération des paramètres définis dans le fichier de configuration """
config = checkNewConfig()
""" Définition des routes Manifests """
abstract = RouteConfig(config)
""" Exécution des Paterns """
abstract.threader(["PATERN1", "PATERN2"], ["live", "catchup"], ["smooth", "dash", "hls"])
""" Niveau de tests VOD | nPVR """
#objVODouNPVR = NomClasseVODouNPVR
#objVODouNPVR.threader(....)
def run(self):
try:
allgo.acquire()
allgo.wait()
allgo.release()
while True:
self.execution()
time.sleep(2)
except:
pass
|
999,108 | dd3c7e202d1119d07af63db8b796565ca2db0c3d | # Generated by Django 3.0.3 on 2020-04-02 13:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0026_comments_email'),
]
operations = [
migrations.RenameField(
model_name='comments',
old_name='approved_comment',
new_name='active',
),
]
|
999,109 | 765119ee3b2b58f614b85836621332a13b02194d | import mesh
import mesh_library
import mesh_processing
# import the packages
import numpy as np
import tensorflow as tf
from scipy.optimize import minimize
import scipy.stats as stats
import time
import random
import CGAL
from CGAL import CGAL_Kernel
from CGAL import CGAL_Polyhedron_3
from CGAL import CGAL_Polygon_mesh_processing
Vertex = CGAL.CGAL_Kernel.Point_3
Vector_3 = CGAL.CGAL_Kernel.Vector_3
BoundingBox = CGAL.CGAL_Polygon_mesh_processing.BoundingBox
Library = mesh_library.MeshLibrary()
class MeshAssembler(object):
#Create one of these objects for each class of 3D model
def __init__(self):
self.max_components = 10
self.features_per_vec = 3 * 4 + 2
self.vector_length = self.features_per_vec * self.max_components
self.generative_adversarial_net()
return None
# Make a mutlilevel perceptron - used for D_pre, D1, D2, G networks
def generate_mlp(self, input, output_dim):
# construct learnable parameters within local scope
size = input.get_shape()[1]
w1=tf.get_variable("w0", [size, size], initializer=tf.random_normal_initializer())
b1=tf.get_variable("b0", [size], initializer=tf.constant_initializer(0.0))
w2=tf.get_variable("w1", [size, self.max_components], initializer=tf.random_normal_initializer())
b2=tf.get_variable("b1", [self.max_components], initializer=tf.constant_initializer(0.0))
w3=tf.get_variable("w2", [self.max_components ,output_dim], initializer=tf.random_normal_initializer())
b3=tf.get_variable("b2", [output_dim], initializer=tf.constant_initializer(0.0))
# nn operators
fc1=tf.nn.tanh(tf.matmul(input,w1)+b1)
fc2=tf.nn.tanh(tf.matmul(fc1,w2)+b2)
fc3=tf.nn.tanh(tf.matmul(fc2,w3)+b3)
return fc3, [w1,b1,w2,b2,w3,b3]
#For teaching the neural net about the basic structure of the feature vector
def fake_feature_vector(self):
feature_vector_components = np.array([])
components = np.random.randint(self.max_components)
for _ in range(components):
#generate a plausible bounding box transform to the feature vector
origin = np.random.random(3)
x_axis = np.random.random(3) - origin
y_axis = np.random.random(3) - origin
z_axis = np.cross(x_axis,y_axis)*np.random.random()*2 #orthogonal, but scaled randomly
feature_vector_components = np.append(feature_vector_components, origin)
feature_vector_components = np.append(feature_vector_components, x_axis)
feature_vector_components = np.append(feature_vector_components, y_axis)
feature_vector_components = np.append(feature_vector_components, z_axis)
#two identifiers for shape style
object_type = np.random.randint(100)
style = np.random.randint(100)
feature_vector_components = np.append(feature_vector_components,object_type)
feature_vector_components = np.append(feature_vector_components,style)
#then pad with zeroes
zeros = (self.max_components - components)*self.features_per_vec
feature_vector = np.append(feature_vector_components,np.zeros(zeros))
return feature_vector
def fill_feature_vector(self, number):
feature_vector = np.full_like(np.arange(self.vector_length, dtype=np.float32), number)
return feature_vector
def pre_train(self, D, theta_d, feature_vectors):
batch=tf.Variable(0)
x_node = tf.placeholder(tf.float32, shape= (1, self.vector_length) )
with tf.variable_scope("D") as scope:
scope.reuse_variables()
obj_d=tf.reduce_mean(tf.log(D))
opt_d=tf.train.GradientDescentOptimizer(0.1).minimize(1-obj_d,global_step=batch,var_list=theta_d)
sess=tf.InteractiveSession()
tf.initialize_all_variables().run()
for vector in feature_vectors:
sess.run(opt_d, feed_dict={x_node: vector})
def generative_adversarial_net(self):
batch=tf.Variable(0)
with tf.variable_scope("G"):
self.y_node = tf.placeholder(tf.float32, shape= (1, self.vector_length) )
self.G, theta_g = self.generate_mlp(self.y_node,self.vector_length)
with tf.variable_scope("D") as scope:
self.z_node = tf.placeholder(tf.float32, shape= (1,self.vector_length) )
D1, theta_d = self.generate_mlp(self.z_node,1)
scope.reuse_variables() #to use variables from G
D2, theta_d = self.generate_mlp(self.G,1)
self.obj_d=tf.reduce_mean(tf.log(D1)+tf.log(1-D2))
self.opt_d=tf.train.GradientDescentOptimizer(0.1).minimize(1-self.obj_d,global_step=batch,var_list=theta_d)
self.obj_g=tf.reduce_mean(tf.log(D2))
self.opt_g=tf.train.GradientDescentOptimizer(0.1).minimize(1-self.obj_g,global_step=batch,var_list=theta_g)
def load_model(self,save_path="output/assembly/model.ckpt"):
sess=tf.InteractiveSession()
saver = tf.train.Saver()
tf.initialize_all_variables().run()
saver.restore(sess, save_path)
def train(self, feature_vectors, save_path="output/assembly/model.ckpt"):
sess=tf.InteractiveSession()
saver = tf.train.Saver()
tf.initialize_all_variables().run()
for vector in feature_vectors:
#Train on features + noise vectors
z = np.reshape(np.array(vector),(1,self.vector_length))
y = np.random.random((1,self.vector_length))
sess.run(self.opt_d, feed_dict={self.y_node: y, self.z_node: z})
y= np.random.random((1,self.vector_length))
sess.run(self.opt_g, feed_dict={self.y_node: y})
save_path = saver.save(sess, save_path)
def generate_feature_vector(self, count):
y= np.random.random((1,self.vector_length)).astype(np.float32)
opt_gen=tf.identity(self.G)
sess=tf.InteractiveSession()
tf.initialize_all_variables().run()
feature_vectors = []
for _ in range(count):
output = sess.run(opt_gen, feed_dict={self.y_node: y})
feature_vectors.append(output)
return feature_vectors
def encode_feature_vector(self, mesh, component_list):
feature_vector_components = []
count = 0;
for poly in component_list[:self.max_components]:
#add the bounding box transform to the feature vector
bounding_box = BoundingBox(poly)
features = [bounding_box.get_origin(), bounding_box.get_x_axis(), bounding_box.get_y_axis(), bounding_box.get_z_axis()]
for vector in features:
feature_vector_components.append(vector.x())
feature_vector_components.append(vector.y())
feature_vector_components.append(vector.z())
#two identifiers for shape style
count = count + 1
feature_vector_components.append(count)
feature_vector_components.append(0)
#mesh_class, mesh_style = Library.component_id(poly)
#feature_vector_components.append(mesh_class)
#feature_vector_components.append(mesh_style)
#pad with zeros
for _ in range(self.max_components - len(component_list)):
for n in range(14):
feature_vector_components.append(0)
vec = np.array(feature_vector_components)
feature_vector = np.reshape(vec, self.vector_length)
return feature_vector
def decode_feature_vector(self, feature_vector):
polygon_list = []
for _ in range(self.max_components):
offset = _ * self.max_components
translation = Vector_3(feature_vector[offset], feature_vector[offset+1], feature_vector[offset+2])
x_axis = Vector_3(feature_vector[offset+3*1], feature_vector[offset+3*1+1], feature_vector[offset+3*1+2])
y_axis = Vector_3(feature_vector[offset+3*2], feature_vector[offset+3*2+1], feature_vector[offset+3*2+2])
z_axis = Vector_3(feature_vector[offset+3*3], feature_vector[offset+3*3+1], feature_vector[offset+3*3+2])
bounding_box = BoundingBox(translation, x_axis, y_axis, z_axis)
poly = Library.retrive_component(vector[offset+24],vector[offset+25])
new_poly = mesh_processing.MeshProcessor.align(poly, bounding_box)
polygon_list.append(new_poly)
return polygon_list
def training_set(self, polygons):
feature_vectors = []
for poly in polygons:
poly_list = mesh_processing.MeshProcessor().segmentation(poly)
if (poly_list.size() < 11):
vec = self.encode_feature_vector(poly,poly_list)
feature_vectors.append(vec)
self.train(feature_vectors)
def generate_meshes(self, count):
vectors = generate_feature_vector(count)
polygon_lists = [self.decode_feature_vector(vec) for vec in vectors]
polygons = [self.stitch(poly_list) for poly_list in polygon_lists]
return polygons
|
999,110 | 303c4317584c641f895c481082e5281c256e76be | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2020
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (InlineQueryResultVoice, InputTextMessageContent, InlineKeyboardButton,
InlineQueryResultVenue, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_venue():
return InlineQueryResultVenue(
TestInlineQueryResultVenue.id_,
TestInlineQueryResultVenue.latitude,
TestInlineQueryResultVenue.longitude,
TestInlineQueryResultVenue.title,
TestInlineQueryResultVenue.address,
foursquare_id=TestInlineQueryResultVenue.foursquare_id,
foursquare_type=TestInlineQueryResultVenue.foursquare_type,
thumb_url=TestInlineQueryResultVenue.thumb_url,
thumb_width=TestInlineQueryResultVenue.thumb_width,
thumb_height=TestInlineQueryResultVenue.thumb_height,
input_message_content=TestInlineQueryResultVenue.input_message_content,
reply_markup=TestInlineQueryResultVenue.reply_markup)
class TestInlineQueryResultVenue(object):
id_ = 'id'
type_ = 'venue'
latitude = 'latitude'
longitude = 'longitude'
title = 'title'
address = 'address'
foursquare_id = 'foursquare id'
foursquare_type = 'foursquare type'
thumb_url = 'thumb url'
thumb_width = 10
thumb_height = 15
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_venue):
assert inline_query_result_venue.id == self.id_
assert inline_query_result_venue.type == self.type_
assert inline_query_result_venue.latitude == self.latitude
assert inline_query_result_venue.longitude == self.longitude
assert inline_query_result_venue.title == self.title
assert inline_query_result_venue.address == self.address
assert inline_query_result_venue.foursquare_id == self.foursquare_id
assert inline_query_result_venue.foursquare_type == self.foursquare_type
assert inline_query_result_venue.thumb_url == self.thumb_url
assert inline_query_result_venue.thumb_width == self.thumb_width
assert inline_query_result_venue.thumb_height == self.thumb_height
assert (inline_query_result_venue.input_message_content.to_dict()
== self.input_message_content.to_dict())
assert inline_query_result_venue.reply_markup.to_dict() == self.reply_markup.to_dict()
def test_to_dict(self, inline_query_result_venue):
inline_query_result_venue_dict = inline_query_result_venue.to_dict()
assert isinstance(inline_query_result_venue_dict, dict)
assert inline_query_result_venue_dict['id'] == inline_query_result_venue.id
assert inline_query_result_venue_dict['type'] == inline_query_result_venue.type
assert inline_query_result_venue_dict['latitude'] == inline_query_result_venue.latitude
assert inline_query_result_venue_dict['longitude'] == inline_query_result_venue.longitude
assert inline_query_result_venue_dict['title'] == inline_query_result_venue.title
assert inline_query_result_venue_dict['address'] == inline_query_result_venue.address
assert (inline_query_result_venue_dict['foursquare_id']
== inline_query_result_venue.foursquare_id)
assert (inline_query_result_venue_dict['foursquare_type']
== inline_query_result_venue.foursquare_type)
assert inline_query_result_venue_dict['thumb_url'] == inline_query_result_venue.thumb_url
assert (inline_query_result_venue_dict['thumb_width']
== inline_query_result_venue.thumb_width)
assert (inline_query_result_venue_dict['thumb_height']
== inline_query_result_venue.thumb_height)
assert (inline_query_result_venue_dict['input_message_content']
== inline_query_result_venue.input_message_content.to_dict())
assert (inline_query_result_venue_dict['reply_markup']
== inline_query_result_venue.reply_markup.to_dict())
def test_equality(self):
a = InlineQueryResultVenue(self.id_, self.longitude, self.latitude, self.title,
self.address)
b = InlineQueryResultVenue(self.id_, self.longitude, self.latitude, self.title,
self.address)
c = InlineQueryResultVenue(self.id_, '', self.latitude, self.title, self.address)
d = InlineQueryResultVenue('', self.longitude, self.latitude, self.title,
self.address)
e = InlineQueryResultVoice(self.id_, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
999,111 | d0599d3f4b5b96854728285d6a464e274b030988 | #! /usr/bin/python
from __future__ import print_function
from dronekit import connect, VehicleMode, LocationGlobalRelative, Command
from pymavlink import mavutil
# Connect vehicle
print("Connect to vehicle")
vehicle = connect("127.0.0.1:14551", wait_ready=True)
# Check arming state
print(vehicle.armed)
# Change vehicle mode
print("Change vehicle mode to Manual")
vehicle.mode = VehicleMode("HOLD")
vehicle.mode = VehicleMode("MANUAL")
# Change vehicle groundspeed
vehicle.groundspeed = 0.5
# Get the set of commands from the vehicle
print("Download commands")
cmds = vehicle.commands
cmds.download()
cmds.wait_ready()
print("Clear commands")
cmds.clear()
# Create and add commands
print("Add 3 waypoints")
cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, 50.9372684, -1.4046249, 100))
cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, 50.9371561, -1.4046146, 100))
cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, 50.9371496, -1.4047894, 100))
print("Add RTL")
cmds.add(Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH, 0, 0, 0, 0, 0, 0, 0, 0, 0))
print("Upload mission")
cmds.upload() # Send commands
vehicle.armed = True
vehicle.mode = VehicleMode("AUTO")
|
999,112 | c12f8d70a4f84967481d632d2d1ade86acec4786 | from city import City
from zoo import Zoo
vienna = City("vienna")
assert vienna.zoo
assert isinstance(vienna.zoo, Zoo)
assert vienna.zoo.size == 130
assert vienna.zoo._owner_name == "Mrs Zoo Keeper"
print(
f"City: {vienna.name}\n"
f"Zoo owner: {vienna.zoo._owner_name}\n"
f"Zoo's size: {vienna.zoo.size}\n"
f"Zoo's animals: {', '.join([animal.name for animal in vienna.zoo.animals])}"
)
|
999,113 | 3582ad4b77c1a2eaef0460a2b3b8e80146f71ebc | from openerp import models, fields, api
class ConciliacionBancaria(models.TransientModel):
_name = "wizard.conciliacionbancaria"
_description = "Conciliacion Bancaria"
conciliacion_id = fields.Many2one('bank.reconciliation', string='Conciliacion Bancaria', required=True)
imprime_movimientos_conciliados = fields.Selection([('si', 'Si'), ('no','No')], 'Imprime Movimientos Conciliados? ', default='no',required=True)
# @api.multi
# def export_xls(self):
# context = self._context
# datas = {'ids': context.get('active_ids', [])}
# datas['model'] = 'bank.reconciliation'
# datas['form'] = self.read()[0]
# for field in datas['form'].keys():
# if isinstance(datas['form'][field], tuple):
# datas['form'][field] = datas['form'][field][0]
# if context.get('xls_export'):
# return {'type': 'ir.actions.report.xml',
# 'report_name': 'export_conciliacionbancaria_xls.conciliacionbancaria_report_xls.xlsx',
# 'datas': datas,
# 'name': 'Conciliacion Bancaria'
# }
def export_xls(self):
context = self._context
datas = {'ids': context.get('active_ids', [])}
datas['model'] = 'bank.reconciliation'
datas['form'] = self.read()[0]
for field in datas['form'].keys():
if isinstance(datas['form'][field], tuple):
datas['form'][field] = datas['form'][field][0]
if context.get('xls_export'):
return self.env.ref('export_conciliacionbancaria_xls.conciliacion_report_xls').report_action(self,data=datas) |
999,114 | 87d65075db946aa39f44d1570340f9b96360180f | # -*- coding: utf-8 -*-
import arcpy
import os
class ConflictDuongBinhDo:
def __init__(self):
self.pathProcessGDB = r"C:\Generalize_25_50\50K_Process.gdb"
self.pathDuongBoNuoc = r"C:\Generalize_25_50\50K_Process.gdb\ThuyHe\DuongBoNuoc"
self.pathDuongBoNuocTemp = r"C:\Generalize_25_50\50K_Process.gdb\ThuyHe\DuongBoNuocTemp"
self.pathDuongBinhDo = r"C:\Generalize_25_50\50K_Process.gdb\DiaHinh\DuongBinhDo"
self.pathDuongBinhDoTemp = r"C:\Generalize_25_50\50K_Process.gdb\DiaHinh\DuongBinhDoTemp"
self.pathDuongBinhDoTempSinglePart = r"C:\Generalize_25_50\50K_Process.gdb\DiaHinh\DuongBinhDoTempSinglePart"
self.pathDuongBinhDoFinal = r"C:\Generalize_25_50\50K_Final.gdb\DiaHinh\DuongBinhDo"
pass
def Execute(self):
# Init WorksSpace
arcpy.env.workspace = self.pathProcessGDB
arcpy.env.overwriteOutput = True
arcpy.env.referenceScale = "50000"
#
print "Copy from \"{}\" to \"{}\"".format(self.pathDuongBinhDo, self.pathDuongBinhDoFinal)
arcpy.CopyFeatures_management(self.pathDuongBinhDo, self.pathDuongBinhDoFinal)
#
self.CreateBufferConflict(self.pathDuongBoNuoc, "DuongBoNuoc_Rep")
# Resolve Road Conflicts
print "Run: Resolve Road Conflicts"
duongBoNuocTempSinglePartLayer = arcpy.MakeFeatureLayer_management(self.pathDuongBoNuoc + "TempSinglePart")
duongBinhDoTempSinglePartLayer = arcpy.MakeFeatureLayer_management(self.pathDuongBinhDoTempSinglePart)
# Add Field
arcpy.AddField_management(duongBoNuocTempSinglePartLayer, "hierarchy", "LONG")
arcpy.AddField_management(duongBinhDoTempSinglePartLayer, "hierarchy", "LONG")
# Update Field
arcpy.CalculateField_management(duongBoNuocTempSinglePartLayer, "hierarchy", "0", "PYTHON_9.3")
arcpy.CalculateField_management(duongBinhDoTempSinglePartLayer, "hierarchy", "1", "PYTHON_9.3")
# Set Layer
arcpy.SetLayerRepresentation_cartography(duongBoNuocTempSinglePartLayer, self.GetAndSetRepresentation(duongBoNuocTempSinglePartLayer))
arcpy.SetLayerRepresentation_cartography(duongBinhDoTempSinglePartLayer, self.GetAndSetRepresentation(duongBinhDoTempSinglePartLayer))
# Run ResolveRoadConflicts
outPutResolveRoadConflicts = os.path.join(self.pathProcessGDB, "OutputResolveRoadConflicts")
arcpy.ResolveRoadConflicts_cartography([duongBoNuocTempSinglePartLayer, duongBinhDoTempSinglePartLayer], "hierarchy", outPutResolveRoadConflicts)
# Run Propagate Displacement
print "Run: Propagate Displacement"
duongBinhDoFinalLayer = arcpy.MakeFeatureLayer_management(self.pathDuongBinhDoFinal)
arcpy.SetLayerRepresentation_cartography(duongBinhDoFinalLayer, self.GetAndSetRepresentation(duongBinhDoFinalLayer))
arcpy.PropagateDisplacement_cartography(duongBinhDoFinalLayer, outPutResolveRoadConflicts, "AUTO")
pass
def ClipByBufferDuongBinhDo(self, duongBinhDoBuffer, inFeatureProcess):
outputClip = "in_memory\\outputClip"
arcpy.Clip_analysis(inFeatureProcess, duongBinhDoBuffer, outputClip, "0 Meters")
return outputClip
pass
def CreateBufferConflict(self, inFeatureProcess, representationName):
#
featureProcessLayer = arcpy.MakeFeatureLayer_management(inFeatureProcess)
duongBinhDoLayer = arcpy.MakeFeatureLayer_management("DuongBinhDo")
arcpy.SetLayerRepresentation_cartography(featureProcessLayer, self.GetAndSetRepresentation(featureProcessLayer))
arcpy.SetLayerRepresentation_cartography(duongBinhDoLayer, self.GetAndSetRepresentation(duongBinhDoLayer))
#
print "Run: Detect Graphic Conflict"
outFeatureClass = self.RunDetectGraphicConflict(featureProcessLayer, duongBinhDoLayer)
#
outFeatureClassConflictBuffer = "in_memory\\featureClassConflictBuffer"
print "Create Feature: {}".format(outFeatureClassConflictBuffer)
arcpy.Buffer_analysis(outFeatureClass, outFeatureClassConflictBuffer, "100 Meters", None, None, "ALL")
print "Delete {}".format(str(outFeatureClass))
arcpy.Delete_management(outFeatureClass)
#
print "Create Feature: {}".format(self.pathDuongBinhDoTemp)
arcpy.Clip_analysis("DuongBinhDo", outFeatureClassConflictBuffer, self.pathDuongBinhDoTemp)
print "Create Feature: {}".format(inFeatureProcess + "Temp")
arcpy.Clip_analysis(inFeatureProcess, outFeatureClassConflictBuffer, inFeatureProcess + "Temp")
#
print "Create Feature: {}".format(self.pathDuongBinhDoTempSinglePart)
if arcpy.Exists(self.pathDuongBinhDoTempSinglePart):
arcpy.Delete_management(self.pathDuongBinhDoTempSinglePart)
arcpy.MultipartToSinglepart_management(self.pathDuongBinhDoTemp, self.pathDuongBinhDoTempSinglePart)
print "Create Feature: {}".format(inFeatureProcess + "TempSinglePart")
if arcpy.Exists(inFeatureProcess + "TempSinglePart"):
arcpy.Delete_management(inFeatureProcess + "TempSinglePart")
arcpy.MultipartToSinglepart_management(inFeatureProcess + "Temp", inFeatureProcess + "TempSinglePart")
pass
def RunDetectGraphicConflict(self, inFeature, conflictFeature):
outFeatureClass = "in_memory\\outFeatureClass"
arcpy.DetectGraphicConflict_cartography(in_features = inFeature,
conflict_features = conflictFeature,
out_feature_class = outFeatureClass,
conflict_distance = "0 Meters",
line_connection_allowance = "1 Points")
return outFeatureClass
pass
def GetAndSetRepresentation(self, inFeature):
desc = arcpy.Describe(inFeature)
if len(desc.representations) == 0:
return None
elif len(desc.representations) == 1:
return desc.representations[0].name
else:
arrRepresentation = []
for child in desc.representations:
if child.datasetType == "RepresentationClass":
arrRepresentation.append(child.name)
print "# Select Representation:"
index = 0
for rep in arrRepresentation:
print " {}. {}".format(str(index), str(rep))
index += 1
print "# Select: "
while(True):
strKey = raw_input()
try:
intKey = int(strKey)
if intKey >= 0 and intKey <= (len(arrRepresentation) - 1):
break
else:
print "# Out of range?"
except ValueError:
print "# Could not convert data to an integer?"
return arrRepresentation[intKey]
pass
class RunTime:
def __init__(self):
self.startTime = time.time()
print "Start time: {}".format(datetime.datetime.now())
pass
def GetTotalRunTime(self):
self.totalRunTime = int(time.time() - self.startTime)
self.ConvertTime()
self.strHours = ""
self.strMinute = ""
self.strSeconds = ""
if self.hours / 10 == 0:
self.strHours = "0" + str(self.hours)
else:
self.strHours = str(self.hours)
if self.minute / 10 == 0:
self.strMinute = "0" + str(self.minute)
else:
self.strMinute = str(self.minute)
if self.seconds / 10 == 0:
self.strSeconds = "0" + str(self.seconds)
else:
self.strSeconds = str(self.seconds)
print "Total time: {0}:{1}:{2}".format(self.strHours, self.strMinute, self.strSeconds)
pass
def ConvertTime(self):
self.hours = self.totalRunTime / (60 * 60)
self.totalRunTime = self.totalRunTime - (self.hours * 60 * 60)
self.minute = self.totalRunTime / 60
self.totalRunTime = self.totalRunTime - (self.minute * 60)
self.seconds = self.totalRunTime
pass
if __name__ == "__main__":
runTime = RunTime()
conflictDuongBinhDo = ConflictDuongBinhDo()
print "Running..."
conflictDuongBinhDo.Execute()
print "Success!!!"
runTime.GetTotalRunTime()
pass
|
999,115 | 97c5abca28a76b57841d92ecdb1c00215b2717c3 | import unittest
from dnnamo.framework.tf import TFFramework
from dnnamo.loader import TFFathomLiteLoader
try:
import fathomlite
_FATHOMLITE = True
except ImportError:
_FATHOMLITE = False
@unittest.skipUnless(_FATHOMLITE, 'No Fathom module found.')
class TestTFFathomLiteLoader(unittest.TestCase):
#_models = ['Seq2Seq', 'MemNet', 'Speech', 'Autoenc', 'Residual', 'VGG', 'AlexNet', 'DeepQ' ]
_models = ['MemNet', 'Autoenc', 'Residual', 'VGG', 'AlexNet']
def test_memnet(self):
TFFramework().load(TFFathomLiteLoader, 'MemNet')
def test_autoenc(self):
TFFramework().load(TFFathomLiteLoader, 'Autoenc')
def test_residual(self):
TFFramework().load(TFFathomLiteLoader, 'Residual')
def test_vgg(self):
TFFramework().load(TFFathomLiteLoader, 'VGG')
def test_alexnet(self):
TFFramework().load(TFFathomLiteLoader, 'AlexNet')
|
999,116 | a7ae39608433ec03c43b4c77d038138b72cc546c | import csv
from itertools import islice
myreader = csv.DictReader(open('/Users/qiaoli/Downloads/allCustNumVisitsRangeLifecycle.csv'))
i=3
list=[]
with open('/Users/qiaoli/Downloads/allCustNumVisitsRangeLifecycle.csv') as inFH:
csvReader = csv.reader(inFH)
for row in csvReader:
if csvReader.line_num ==i:
list.append(row)
i=i+2
with open('/Users/qiaoli/Downloads/end_date.csv','wb') as fp:
csv_writer = csv.writer(fp)
csv_writer.writerows(list)
print "done"
|
999,117 | fe1e87b7d2dde99314f90e108f18e53dcf89486e | from java.util import HashMap
from java.util import HashSet
from java.util import Collections
from java.util import ArrayList
from java.io import FileInputStream
from com.bea.wli.config import TypeIds
from com.bea.wli.config.customization import Customization
from com.bea.wli.sb.management.importexport import ALSBImportOperation
import sys
#=======================================================================================
# Entry function to deploy project configuration and resources
# into a ALSB domain
#=======================================================================================
def customizeALSBDomain(importConfigFile, customFile):
try:
SessionMBean = None
print 'Loading config from :', importConfigFile
configProp = loadProps(importConfigFile)
adminUrl = configProp.get("adminUrl")
user = configProp.get("user")
password = configProp.get("password")
passphrase = configProp.get("passphrase")
project = configProp.get("project")
connectToServer(user, password, adminUrl)
print 'connected to yer server'
sessionName = createSessionName()
print 'Created session', sessionName
SessionMBean = getSessionMBean(sessionName)
print 'SessionMBean started session'
ALSBConfigurationMBean = findService(String("ALSBConfiguration.").concat(sessionName), "com.bea.wli.sb.management.configuration.ALSBConfigurationMBean")
print "ALSBConfiguration MBean found", ALSBConfigurationMBean
#customize if a customization file is specified
#affects only the created resources
if customFile != None :
print 'Loading customization File', customFile
iStream = FileInputStream(customFile)
customizationList = Customization.fromXML(iStream)
for customization in customizationList:
print '\n customization', customization.getDescription()
print customizationList.size()
ALSBConfigurationMBean.customize(customizationList)
print 'Customization applied'
SessionMBean.commitSession(sessionName)
print 'session committed'
except:
print "Unexpected error:", sys.exc_info()[0]
if SessionMBean != None:
SessionMBean.discardSession(sessionName)
raise
#=======================================================================================
# Utility function to print the list of operations
#=======================================================================================
def printOpMap(map):
set = map.entrySet()
for entry in set:
op = entry.getValue()
print op.getOperation(),
ref = entry.getKey()
print ref
print
#=======================================================================================
# Utility function to print the diagnostics
#=======================================================================================
def printDiagMap(map):
set = map.entrySet()
for entry in set:
diag = entry.getValue().toString()
print diag
print
#=======================================================================================
# Utility function to load properties from a config file
#=======================================================================================
def loadProps(configPropFile):
propInputStream = FileInputStream(configPropFile)
configProps = Properties()
configProps.load(propInputStream)
return configProps
#=======================================================================================
# Connect to the Admin Server
#=======================================================================================
def connectToServer(username, password, url):
connect(username, password, url)
domainRuntime()
#=======================================================================================
# Utility function to read a binary file
#=======================================================================================
def readBinaryFile(fileName):
file = open(fileName, 'rb')
bytes = file.read()
return bytes
#=======================================================================================
# Utility function to create an arbitrary session name
#=======================================================================================
def createSessionName():
sessionName = String("SessionScript"+Long(System.currentTimeMillis()).toString())
return sessionName
#=======================================================================================
# Utility function to load a session MBeans
#=======================================================================================
def getSessionMBean(sessionName):
SessionMBean = findService("Session","com.bea.wli.config.mbeans.SessionMBean")
SessionMBean.createSession(sessionName)
return SessionMBean
# IMPORT script init
try:
# argv[1] is the export config properties file
customizeALSBDomain(sys.argv[1], sys.argv[2])
except:
print "Unexpected error: ", sys.exc_info()[0]
dumpStack()
raise |
999,118 | 2c07a99ab27ddd2dda8818d543eebaa4e7b8f089 | #在Python程序中,分别使用未定义变量、访问列表不存在的索引、访问字典不存在的关键字观察系统提示的错误信息
# 使用未定义变量
# i+j
# 访问列表不存在的索引
# list_a = ['a',1,'c']
# print(list_a[4])
# 访问字典不存在的key
# dict_a = {'a':1,'b':2}
# print(dict_a['c'])
#通过Python程序产生IndexError,并用try捕获异常处理
try:
list_b = ['a','b','c']
print(list_b[3])
except Exception as a:
print(' 发生错误:%s ' % a) |
999,119 | 6e22a82f53cc1ef0f7e7d35347a78a18a4cf7f42 | import sys
sys.path.append("../")
import config
import logging
from os.path import join
def get_line_offset(filepath):
'''
For each file, return list of new line offsets
:param filepath: path of the file to be read
:return: list of line offsets
'''
try:
fread = open(filepath, 'r', 1)
except:
logging.info(filepath + ' not found while calculating line offsets')
return -1
line_offset = []
offset = 0
for line in fread:
line_offset.append(offset)
offset += len(line)
return line_offset
def main(userID, content_path):
'''
For each user, this returns a dictionary keyed on a content file for the user and
with value being all new line offsets in that file.
:param userID: unique identifier for the user
:param content_path: path to all the content files of the user
:return: {content_file_id : [line offsets]}
'''
line_offset_dict = dict()
with open(join(config.CONTEXT_FOLDER, config.ID2CFILE + str(userID)), 'r') as fread:
for line in fread:
[content_file_id, content_file_name] = line.strip().split('\t')
dayID = content_file_id.split('_')[1]
line_offset_dict[content_file_id] = get_line_offset(join(join(content_path, 'day_' + dayID), content_file_name))
return line_offset_dict
|
999,120 | 22d1a75ebdcc4fd61a6174faca3ab3eeff09bce4 | import sys,math;a,b,c,e=[int(i)for i in raw_input().split()]
while 1:
h='';i=''
if c>a:h='W';c+=1
elif c<a:h='E';c-=1
if e>b:i='N';e-=1
elif e<b:i='S';e+=1
print i+h |
999,121 | 82f577a60afa442bc6e8187b0a536b13ee11045b | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Listens to audio from microphone and waits for the configured wake word.
Once the wake word is detected, the program records audio and sends it to Google for converting to text.
When text is received, it is sent to an executor function which checks configured actions and triggers tha correct one
associated with this text command. The return message is then converted back to audio.
Currently, the file is prepared for use with Serbian language. All you need to do to use it with another language is to
change the "options.cfg" file which will be generated on first run.
"""
import os
import json
import random
import hashlib
import subprocess
import speech_recognition as sr
from gtts import gTTS
from precise_runner import PreciseEngine, PreciseRunner
import time
# Configuration
WAKE_WORD = "hey-mycroft"
LANG = 'sr'
WAKE_WORD_DIR = "wake_word"
RESPONSE_WORDS = ["molim", "da", "slušam", "kako mogu da pomognem", "izvolite", "recite"]
current_path = os.path.dirname(os.path.realpath(__file__))
cfg_path = os.path.join(current_path, 'options.cfg')
audio_cache_dir = os.path.join(current_path, 'audio_samples')
actions_path = os.path.join(current_path, 'actions')
actions = []
sleep_process = None
def calc_hash(sentence):
text_hash = hashlib.md5()
text_hash.update(sentence.encode())
return text_hash.hexdigest()
def text_to_audio(txt_data):
audio_file_name = "{}.mp3".format(calc_hash(txt_data))
audio_file_path = os.path.join(audio_cache_dir, audio_file_name)
if not os.path.isfile(audio_file_path):
speech = gTTS(text=txt_data, lang=LANG, slow=False)
speech.save(audio_file_path)
return audio_file_path
def select_response():
# Choose randomly item from RESPONSE_WORDS list
id = random.choice(range(0, len(RESPONSE_WORDS)))
return RESPONSE_WORDS[id]
def generate_audio(text_to_say):
if text_to_say is None:
return
hash_calculator = hashlib.md5()
hash_calculator.update(text_to_say.encode())
text_hash = hash_calculator.hexdigest()
audio_file_name = "{}.mp3".format(text_hash)
audio_file_path = os.path.join(audio_cache_dir, audio_file_name)
if os.path.isfile(audio_file_path):
print('Saying:', text_to_say)
else:
print('Generating:', text_to_say)
text_to_audio(text_to_say)
print("Done")
os.system("mpg321 {}".format(audio_file_path))
def recognize_speech_from_mic(recognizer, microphone):
"""Transcribe speech from recorded from `microphone`.
Returns a dictionary with three keys:
"success": a boolean indicating whether or not the API request was
successful
"error": `None` if no error occured, otherwise a string containing
an error message if the API could not be reached or
speech was unrecognizable
"transcription": `None` if speech could not be transcribed,
otherwise a string containing the transcribed text
"""
# check that recognizer and microphone arguments are appropriate type
if not isinstance(recognizer, sr.Recognizer):
raise TypeError("`recognizer` must be `Recognizer` instance")
if not isinstance(microphone, sr.Microphone):
raise TypeError("`microphone` must be `Microphone` instance")
# adjust the recognizer sensitivity to ambient noise and record audio
# from the microphone
with microphone as source:
recognizer.adjust_for_ambient_noise(source)
audio = recognizer.listen(source)
# set up the response object
response = {
"success": True,
"error": None,
"transcription": None
}
# try recognizing the speech in the recording
# if a RequestError or UnknownValueError exception is caught,
# update the response object accordingly
try:
response["transcription"] = recognizer.recognize_google(audio, language=LANG)
except sr.RequestError:
# API was unreachable or unresponsive
response["success"] = False
response["error"] = "API unavailable"
except sr.UnknownValueError:
# speech was unintelligible
response["error"] = "Unable to recognize speech"
return response
def listen_user():
# create recognizer and mic instances
recognizer = sr.Recognizer()
microphone = sr.Microphone()
print("Say something")
guess = recognize_speech_from_mic(recognizer, microphone)
if guess["transcription"] and guess["success"]:
print("Heard:", guess["transcription"])
else:
print("ERROR:", guess["transcription"])
return guess["transcription"]
def get_config():
global WAKE_WORD
global LANG
global RESPONSE_WORDS
# Read configuration from the cfg file if it exists
if os.path.isfile(cfg_path):
cfg = open(cfg_path, 'r')
raw_data = cfg.read()
cfg.close()
else:
raw_data = '{}'
# Load the data info a json object
try:
data = json.loads(raw_data)
except Exception as e:
print('ERROR parsing config data: {}'.format(e))
data = {}
# Check if all data is available. If not, replace with defaults
cfg_data_missing = False
if 'WAKE_WORD' not in data.keys():
data['WAKE_WORD'] = WAKE_WORD
cfg_data_missing = True
if 'LANG' not in data.keys():
data['LANG'] = LANG
cfg_data_missing = True
if 'RESPONSE_WORDS' not in data.keys():
data['RESPONSE_WORDS'] = RESPONSE_WORDS
cfg_data_missing = True
if cfg_data_missing:
# Some of the configuration values are missing. Add them to the file.
print("INFO: Writing new configuration.")
# Custom data dump
raw_data = "{\n"
for key in data.keys():
raw_data += '"{}": '.format(key)
if isinstance(data[key], int) or isinstance(data[key], list):
raw_data += '{},\n'.format(data[key])
else:
raw_data += '"{}",\n'.format(data[key])
raw_data = raw_data[:-2] + '\n'
raw_data += "}\n"
raw_data = raw_data.replace("'", '"')
try:
cfg = open(cfg_path, 'w')
cfg.write(raw_data)
cfg.close()
except Exception as e:
print('ERROR writing configuration data: {}'.format(e))
# Set defaults according to setup
WAKE_WORD = data['WAKE_WORD']
LANG = data['LANG']
RESPONSE_WORDS = data['RESPONSE_WORDS']
def load_actions():
global actions
if not os.path.isdir(actions_path):
os.path.mkdir(actions_path)
dir_contents = os.listdir(actions_path)
for item in dir_contents:
# Check if item is a directory
item_path = os.path.join(actions_path, item)
if os.path.isdir(item_path):
# Check if there is a "words" file and an "action" script
words_path = os.path.join(item_path, "words")
script_path = os.path.join(item_path, "action")
if os.path.isfile(words_path) and os.path.isfile(script_path):
if not os.access(script_path, os.X_OK):
print("Script {} is not executable. Making it so.".format(script_path))
os.chmod(script_path, 0o777)
f = open(words_path, 'r')
data = f.read().replace('\n', '')
f.close()
trigger_words = []
for word in data.split(','):
trigger_words.append(word.strip())
actions.append({'trig': trigger_words, 'exec': script_path})
print(actions)
def execute(command_string):
print("EXEC:", command_string)
matched_action = None
try:
for test_action in actions:
words = test_action['trig']
# Check if any of the words are in this action
for test_word in words:
if test_word in command_string:
matched_action = test_action['exec']
command = [matched_action, command_string]
process = subprocess.run(command, check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = process.stdout
return output
except Exception as e:
print("ERROR executing action:", e)
return None
def wake_up():
generate_audio(select_response())
user_request = listen_user()
if user_request is not None:
# Execute the request
response = execute(user_request)
generate_audio(response)
# Prepare audio cache directory
if not os.path.isdir(audio_cache_dir):
os.mkdir(audio_cache_dir)
get_config()
load_actions()
wake_word_model_path = os.path.join(current_path, WAKE_WORD_DIR, WAKE_WORD, WAKE_WORD + '.pb')
engine = PreciseEngine('mycroft-precise', wake_word_model_path)
runner = PreciseRunner(engine, on_activation=wake_up)
runner.start()
while True:
time.sleep(10)
|
999,122 | b155504d7a49a0fc02e5650510aad7762f2dc25c | import sys
while True:
line = sys.stdin.readline().rstrip()
if line == ".":
break
true_flag = 1
stack = []
for i in range(len(line)):
if line[i] == "(":
stack.append("(")
elif line[i] == ")":
if stack and stack[-1] == "(":
stack.pop()
else:
true_flag = 0
break
elif line[i] == "[":
stack.append("[")
elif line[i] == "]":
if stack and stack[-1] == "[":
stack.pop()
else:
true_flag = 0
break
if true_flag and not stack:
print("yes")
else:
print("no")
|
999,123 | 67543d815fc48f63b490b2c396e8ad4b57368d91 | import torch.nn as nn
import torch
import pyro.distributions as distribution
import torch.nn.functional as F
import copy
class MeanFieldNormal(nn.Module):
def __init__(self, shape, loc=None, scale=None, event=1):
super(MeanFieldNormal, self).__init__()
self.event = event
if loc is None:
self.loc = nn.Parameter(torch.normal(mean=torch.zeros(shape), std=torch.ones(shape) * 0.1), requires_grad=True)
else:
self.loc = nn.Parameter(loc, requires_grad=True)
if scale is None:
self.scale = nn.Parameter(torch.normal(mean=torch.ones(shape) * -3, std=0.1*torch.ones(shape)), requires_grad=True)
else:
self.scale = nn.Parameter(scale, requires_grad=True)
def rsample(self):
sigma = F.softplus(self.scale)
dist = distribution.Normal(loc=self.loc, scale=sigma).to_event(self.event)
return dist.rsample()
def log_prob(self, value):
sigma = F.softplus(self.scale)
dist = distribution.Normal(loc=self.loc, scale=sigma).to_event(self.event)
return dist.log_prob(value)
def forward(self, x):
raise NotImplementedError
def distribution(self):
sigma = F.softplus(self.scale)
dist = torch.distributions.Normal(loc=self.loc, scale=sigma)
return dist
|
999,124 | eefc8b540201446b49bd284c08b28652131534a8 | """darkbox.util.osutil"""
import distro
import platform
def get_platform():
plat = platform.system()
if plat == 'Darwin':
return 'mac'
elif plat == 'Windows':
return 'win'
elif plat == 'Linux':
return 'nix'
else:
return 'unk'
def get_distro():
"""
From platform.py:
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
"""
return distro.linux_distribution(full_distribution_name=False)[0] |
999,125 | 7cc044bee1657f5d490828a0c34bd06317dfd81f | #!/usr/bin/env python
# coding: utf-8
from cx_Freeze import setup, Executable
import sys,os
build_exe_options = {"packages": ["os"], "includes":["tkinter", "tkinter.ttk"]}
base=None
if sys.platform == "win32":
base = "Win32GUI"
setup(name="Timer",
version="1.0",
description="Countdown timer",
options = {"build_exe": build_exe_options},
executables=[Executable("Timer.py", base=base, icon = "chronometer.ico")]
)
|
999,126 | 90bb83c86f1989004f08869f65d30c835e9a30b4 | from __future__ import division
import numpy as np
import itertools
import logging
from .endclasses import endarray, lton, wc
from .energetics_basic import EnergeticsBasic
LOGGER = logging.getLogger(__name__)
def values_chunked(items, endtype, chunk_dim=10):
"""
Given a list of lists of acceptable numbers for each position in a row of
an array, create every possible row, and return an iterator that returns
chunks of every possible row up to chunk_dim, iterating dimensions higher
than chunk_dim. This probably doesn't need to be called directly, and may
have a _ added in the future.
Return this as an endarray, with set endtype. This can be easily emoved
for use elsewhere.
"""
ilengths = [len(x) for x in items]
n = len(items)
items = [np.array(x) for x in items]
if n > chunk_dim:
p = n - chunk_dim
q = chunk_dim
outer = itertools.product(*(items[0:p]))
else:
p = 0
q = n
def outer_iter():
yield ()
outer = outer_iter()
chunk = np.zeros(
[np.prod(ilengths[p:]), len(items)], dtype=int).view(endarray)
chunk.endtype = endtype
chunk[:, p:] = np.indices(ilengths[p:]).reshape(q, -1).T
for i in range(p, n):
chunk[:, i] = items[i][chunk[:, i]]
for seq in outer:
chunk[:, :p] = seq
yield chunk
def get_accept_set(endtype,
length,
interaction,
fdev,
maxendspurious,
spacefilter=None,
adjacents=['n', 'n'],
alphabet='n',
energetics=None):
if not energetics:
energetics = EnergeticsBasic()
if not spacefilter:
spacefilter = spacefilter_standard(interaction, interaction * fdev,
maxendspurious)
# Generate the template.
if endtype == 'DT':
template = [lton[adjacents[0]]] + [lton[alphabet.lower()]] \
* length + [lton[wc[adjacents[1]]]]
elif endtype == 'TD':
template = [lton[wc[adjacents[1]]]] + [lton[alphabet.lower()]] \
* length + [lton[adjacents[0]]]
elif endtype == 'S':
template = [lton[alphabet.lower()]]*length
LOGGER.info("Length {0}, type {1}, adjacents {2}, alphabet {3}.".format(
length, endtype, adjacents, alphabet))
LOGGER.debug("Have template {0}.".format(template, endtype))
# Create the chunk iterator
endchunk = values_chunked(template, endtype)
# Use spacefilter to filter chunks down to usable sequences
matcharrays = []
chunknum = 0
totchunks = None
totends = np.product([len(x) for x in template])
LOGGER.debug(
"Have {0} ends in total before any filtering.".format(totends))
for chunk in endchunk:
matcharrays.append(spacefilter(chunk, energetics))
if not totchunks:
totchunks = totends // len(chunk)
chunknum += 1
LOGGER.debug("Found {0} filtered ends in chunk {1} of {2}.".format(
len(matcharrays[-1]), chunknum, totchunks))
LOGGER.debug("Done with spacefiltering.")
availends = np.vstack(matcharrays).view(endarray)
availends.endtype = endtype
return availends
def _make_avail(endtype,
length,
spacefilter,
endfilter,
endchooser,
energetics,
adjacents=['n', 'n'],
num=0,
numtries=1,
oldendfilter=None,
oldends=[],
alphabet='n'):
# Generate the template.
if endtype == 'DT':
template = [lton[adjacents[0]]] + [lton[alphabet.lower()]] \
* length + [lton[wc[adjacents[1]]]]
elif endtype == 'TD':
template = [lton[wc[adjacents[1]]]] + [lton[alphabet.lower()]] \
* length + [lton[adjacents[0]]]
elif endtype == 'S':
template = [lton[alphabet.lower()]]*length
LOGGER.info("Length {0}, type {1}, adjacents {2}, alphabet {3}.".format(
length, endtype, adjacents, alphabet))
LOGGER.debug("Have template {0}.".format(template, endtype))
# Create the chunk iterator
endchunk = values_chunked(template, endtype)
# Use spacefilter to filter chunks down to usable sequences
matcharrays = []
chunknum = 0
totchunks = None
totends = np.product([len(x) for x in template])
LOGGER.debug(
"Have {0} ends in total before any filtering.".format(totends))
for chunk in endchunk:
matcharrays.append(spacefilter(chunk, energetics))
if not totchunks:
totchunks = totends // len(chunk)
chunknum += 1
LOGGER.debug("Found {0} filtered ends in chunk {1} of {2}.".format(
len(matcharrays[-1]), chunknum, totchunks))
LOGGER.debug("Done with spacefiltering.")
availends = np.vstack(matcharrays).view(endarray)
availends.endtype = endtype
# Use endfilter to filter available sequences taking into account old
# sequences.
if len(oldends) > 0:
if oldendfilter:
availends = oldendfilter(oldends, None, availends, energetics)
else:
availends = endfilter(oldends, None, availends, energetics)
return availends
def find_end_set_uniform(endtype,
length,
spacefilter,
endfilter,
endchooser,
energetics,
adjacents=['n', 'n'],
num=0,
numtries=1,
oldendfilter=None,
oldends=[],
alphabet='n',
_presetavail=False):
"""
Find a set of ends of uniform length and type satisfying uniform
constraint functions (eg, constrant functions are the same for each
end).
This function is intended to be complicated and featureful. If you want
something simpler, try easy_ends
Parameters
----------
endtype : str
right now 'DT' for 3'-terminal ends, and 'TD' for
5'-terminal ends,
length : int
length of ends, not including adjacent bases, if applicable.
adjacents : list of str
(defaults to ['n','n']): acceptable bases for adjacents
(eg, ['n','n'] or ['c', 'c']) for the ends and their complements,
num : int
(defaults to 0): number of ends to find (0 keeps finding until
available ends are exhausted)
numtries : int
(defaults to 1): if > 1, the function will return a list of
sets of ends that all individually satisfy the constraints, so that
the best one can be selected manually
spacefilter: function
a "spacefilter" function that takes endarrays and
filters them down to ends that, not considering spurious
interactions, are acceptable.
endfilter: function
an "endfilter" function that takes current ends in the
set, available ends (filtered with current ends), and new ends added,
and filters the available ends, considering interactions between ends
(eg, spurious interactions).
endchooser : function
an "endchooser" function that takes current ends in the
set and available ends, and returns a new end to add to the set.
energetics : function
an "energyfunctions" class that provides the energy
functions for everything to use.
oldends : endarray
an endarray of old ends to consider as part of the set
alphabet : str
a single letter specifying what the alphabet for the ends
should be (eg, four or three-letter code)
oldendfilter : str
a different "endfilter" function for use when filtering
the available ends using interactions with old ends. This is normally
not useful, but can be useful if you want, for example, to create a
sets with higher cross-interactions between two subsets than within
the two subsets.
Returns
-------
endarray
an endarray of generated ends, including provided old ends
"""
if len(oldends) > 0:
if type(oldends[0]) is str:
oldends = endarray(oldends, endtype)
if isinstance(_presetavail, endarray):
startavail = _presetavail
else:
startavail = _make_avail(endtype,
length,
spacefilter,
endfilter,
endchooser,
energetics,
adjacents,
num,
numtries,
oldendfilter,
oldends,
alphabet)
endsets = []
availends = startavail.copy()
LOGGER.debug("Starting with {0} ends.".format(len(availends)))
while len(endsets) < numtries:
curends = oldends
availends = startavail.copy()
numends = 0
while True:
newend = endarray(
np.array([endchooser(curends, availends, energetics)]),
endtype)
LOGGER.debug("Chose end {0}.".format(repr(newend)))
newend.endtype = endtype
availends = endfilter(newend, curends, availends, energetics)
LOGGER.debug("Done filtering.")
if curends is None:
curends = newend
elif len(curends) == 0:
curends = newend
else:
curends = curends.append(newend)
numends += 1
LOGGER.debug("Now have {0} ends in set, and {1} ends available.".format(numends, len(availends)))
if len(availends) == 0:
LOGGER.info("Found {0} ends.".format(numends))
break
if numends >= num and num > 0:
break
endsets.append(curends)
# Verification:
# Note: this currently gives weird output that is not helpful when it fails.
# But if this fails, you've done something very weird, most likely, because
# this is just internal sanity checking.
for endset in endsets:
oldr = np.arange(0, len(oldends))
newr = np.arange(len(oldends), len(endset))
allr = np.arange(0, len(endset))
# All new ends must satisfy old ends:
if oldendfilter is None and len(oldends) > 0:
assert np.asarray(
endfilter(endset[oldr, :], None,
endset[newr, :], energetics) ==
endset[newr, :]).all()
elif len(oldends) > 0:
assert np.asarray(
oldendfilter(endset[oldr, :], None,
endset[newr, :], energetics) ==
endset[newr, :]).all()
# Each new end must allow all others
for i in newr:
if oldendfilter is None:
assert np.asarray(
endfilter(endset[i, :][None, :], None,
endset, energetics) ==
endset[i != allr, :]).all()
else:
assert np.asarray(
oldendfilter(endset[i, :][None, :], None,
endset[oldr, :], energetics) ==
endset[oldr, :]).all()
assert np.asarray(
endfilter(endset[i, :][None, :], None,
endset[newr, :], energetics) ==
endset[newr[i != newr], :]).all()
if len(endsets) > 1:
return endsets
else:
if _presetavail is None or isinstance(_presetavail,endarray):
return endsets[0], startavail
else:
return endsets[0]
def enhist(endtype,
length,
adjacents=['n', 'n'],
alphabet='n',
bins=None,
energetics=None,
plot=False,
color='b'):
if endtype == 'DT':
template = [lton[adjacents[0]]] +\
[lton[alphabet.lower()]] * length + [lton[wc[adjacents[1]]]]
elif endtype == 'TD':
template = [lton[wc[adjacents[1]]]] +\
[lton[alphabet.lower()]] * length + [lton[adjacents[0]]]
elif endtype == 'S':
template = [lton[alphabet.lower()]]*length
if not energetics:
energetics = EnergeticsBasic()
minbin = 0.8 * energetics.matching_uniform(
endarray([([0, 3] * length)[0:length + 2]], endtype))
maxbin = 1.1 * energetics.matching_uniform(
endarray([([1, 2] * length)[0:length + 2]], endtype))
if not bins:
bins = np.arange(minbin, maxbin, 0.1)
LOGGER.debug("Have template {0} and type {1}.".format(template, endtype))
# Create the chunk iterator
endchunk = values_chunked(template, endtype)
hist = np.zeros(len(bins) - 1, dtype='int')
totends = np.product([len(x) for x in template])
finishedends = 0
info = {'min': np.inf, 'max': -np.inf, 'mean': 0}
for chunk in endchunk:
matchens = energetics.matching_uniform(chunk)
hist += np.histogram(matchens, bins)[0]
info['max'] = max(info['max'], np.amax(matchens))
info['min'] = min(info['min'], np.amin(matchens))
info['mean'] = (info['mean']*(finishedends)/(len(chunk)+finishedends)
+ np.mean(matchens) * len(chunk) /
(len(chunk)+finishedends))
finishedends += len(matchens)
LOGGER.debug("Done with {0}/{1} ends.".format(finishedends, totends))
x = (bins[:-1] + bins[1:]) / 2
n = hist
info['emean'] = np.sum(n * x, dtype='double') / np.sum(n, dtype='int64')
info['estd'] = np.sqrt(
np.sum(n * (
x - info['emean'])**2, dtype='double') / np.sum(n, dtype='int64'))
cs = np.cumsum(n)
info['emedian'] = x[np.flatnonzero(cs >= cs[-1] / 2.0)[0]]
if plot:
import matplotlib.pyplot as plt
plt.bar(
bins[:-1],
hist,
width=(bins[1] - bins[0]),
label="Type {3}, Length {0}, Adjs {1}, Alph {2}".format(
length, adjacents, alphabet, endtype),
color=color)
plt.title(
"Matching Energies of Ends of Type {3}, Length {0}, Adjs {1}, Alph {2}".
format(length, adjacents, alphabet, endtype))
plt.xlabel("Standard Free Energy (-kcal/mol)")
plt.ylabel("Number of Ends")
# plt.show()
return (hist, bins, info)
def easyends(endtype,
endlength,
number=0,
interaction=None,
fdev=0.05,
maxspurious=0.5,
maxendspurious=None,
tries=1,
oldends=[],
adjs=['n', 'n'],
energetics=None,
alphabet='n',
echoose=None,
absolute=False,
_presetavail=False):
"""
Easyends is an attempt at creating an easy-to-use function for finding sets
of ends.
* endtype: specifies the type of end being considered. The system for
classifying end types goes from 5' to 3', and consists of letters
describing each side of the end. For example, an end that starts after a
double-stranded region on the 5' side and ends at the end of the strand
would be 'DT', while one that starts at the beginning of a strand on the
5' side and ends in a double-stranded region would be 'TD'. 'T' stands
for terminal, 'D' stands for double-stranded region, and 'S' stands for
single-stranded region. 'S', however, is not currently supported.
* endlength: specifies the length of end being considered, not including
adjacent bases.
* number (optional): specifies the number of ends to find. If zero or not
provided, easyends tries to find as many ends as possible.
* interaction (optional): a positive number corresponding to the desired
standard free energy for hybridization of matching sticky ends. If not
provided, easyends calculates an optimal value based on the sequence
space.
* fdev (default 0.05): the fractional deviation (above or below) of
allowable matching energies. maxspurious (default 0.5): the maximum
spurious interaction, as a fraction of the matching interaction.
* maxendspurious (default None): if provided, maxspurious is only used for
spurious interactions between ends defined as ends, and ends defined as
complements. Maxendspurious is then the maximum spurious interaction
between ends and ends, and complements and complements. In a system
where spurious interactions between ends and complements are more important
than other spurious interactions, this can allow for better sets of ends.
* tries (default 1): if > 1, easyends will return a list of sets of ends,
all satisfying the constraints.
* oldends (optional): a list of ends to be considered as already part of
the set.
* adjacents (default ['n','n']): allowable adjacent bases for ends and
complements.
* absolute (default False): fdev, maxspurious, and maxendspurious to be
interpreted as absolute kcal/mol values rather than fractional values.
* energetics (optional): an energetics class providing the energy
calculation functions. You probably don't need to change this.
* alphabet (default 'n'): The alphabet to use for ends, allowing
for three-letter codes.
"""
if not energetics:
efunc = EnergeticsBasic()
else:
efunc = energetics
if (not interaction) or (interaction == 0):
interaction = enhist(
endtype,
endlength,
energetics=efunc,
adjacents=adjs,
alphabet=alphabet)[2]['emedian']
LOGGER.info("Calculated optimal interaction energy is {0}.".format(
interaction))
if not absolute:
mult = interaction
else:
mult = 1.0
maxcompspurious = maxspurious * mult
if not maxendspurious:
maxendspurious = maxspurious * mult
else:
maxendspurious = maxendspurious * mult
sfilt = spacefilter_standard(interaction, mult * fdev,
maxendspurious)
efilt = endfilter_standard_advanced(maxcompspurious, maxendspurious)
if not echoose:
echoose = endchooser_standard(interaction)
return find_end_set_uniform(
endtype,
endlength,
sfilt,
efilt,
echoose,
energetics=efunc,
numtries=tries,
oldends=oldends,
adjacents=adjs,
num=number,
alphabet=alphabet,
_presetavail=_presetavail)
def easy_space(endtype,
endlength,
interaction=None,
fdev=0.05,
maxspurious=0.5,
maxendspurious=None,
tries=1,
oldends=[],
adjs=['n', 'n'],
energetics=None,
alphabet='n',
echoose=None):
length = endlength
if not energetics:
efunc = EnergeticsBasic()
energetics = efunc
else:
efunc = energetics
if (not interaction) or (interaction == 0):
interaction = enhist(
endtype,
endlength,
energetics=efunc,
adjacents=adjs,
alphabet=alphabet)[2]['emedian']
LOGGER.info("Calculated optimal interaction energy is {0}.".format(
interaction))
maxcompspurious = maxspurious * interaction
if not maxendspurious:
maxendspurious = maxspurious * interaction
else:
maxendspurious = maxendspurious * interaction
sfilt = spacefilter_standard(interaction, interaction * fdev,
maxendspurious)
spacefilter = sfilt
if not echoose:
echoose = endchooser_standard(interaction)
adjacents = adjs
if endtype == 'DT':
template = [lton[adjacents[0]]] + [lton[alphabet.lower()]] \
* length + [lton[wc[adjacents[1]]]]
elif endtype == 'TD':
template = [lton[wc[adjacents[1]]]] + [lton[alphabet.lower()]] \
* length + [lton[adjacents[0]]]
# Create the chunk iterator
endchunk = values_chunked(template, endtype)
# Use spacefilter to filter chunks down to usable sequences
matcharrays = []
chunknum = 0
totchunks = None
totends = np.product([len(x) for x in template])
LOGGER.info(
"Have {0} ends in total before any filtering.".format(totends))
for chunk in endchunk:
matcharrays.append(spacefilter(chunk, energetics))
if not totchunks:
totchunks = totends // len(chunk)
chunknum += 1
LOGGER.debug("Found {0} filtered ends in chunk {1} of {2}.".format(
len(matcharrays[-1]), chunknum, totchunks))
LOGGER.debug("Done with spacefiltering.")
availends = np.vstack(matcharrays).view(endarray)
availends.endtype = endtype
availendsr = np.repeat(availends, len(availends), axis=0)
availendst = np.tile(availends, (len(availends), 1))
vals_ee = energetics.uniform(availendsr.ends, availendst.ends)
vals_ec = energetics.uniform(availendsr.ends, availendst.comps)
vals_ce = energetics.uniform(availendsr.comps, availendst.ends)
vals_cc = energetics.uniform(availendsr.comps, availendst.comps)
vals_tf = ((vals_ee < maxendspurious) & (vals_cc < maxendspurious) &
(vals_ec < maxcompspurious) & (vals_ce < maxcompspurious))
zipendsnf = zip(availendsr.tolist(), availendst.tolist())
zipends = [zipendsnf[x] for x in np.flatnonzero(vals_tf)]
return zipends
def spacefilter_standard(desint, dev, maxself):
"""
A spacefilter function: filters to ends that have a end-complement
interaction of between desint-dev and desint+dev, and a self-interaction
(end-end or comp-comp) of less than maxself.
"""
def spacefilter(fullends, energetics):
matchenergies = energetics.matching_uniform(fullends)
g4 = np.zeros(fullends.shape[0])
for w in range(0, (fullends.shape[1] - 3)):
g4 += (np.sum(
np.array(fullends[:, w:(w + 4)] == [2, 2, 2, 2]), axis=1) == 4)
g4 += (np.sum(
np.array(fullends[:, w:(w + 4)] == [1, 1, 1, 1]), axis=1) == 4)
i = np.flatnonzero((matchenergies < desint + dev) &
(matchenergies > desint - dev) & (g4 == 0))
matchenergies = matchenergies[i]
fullends = fullends[i]
selfselfenergies = energetics.uniform(fullends.ends, fullends.ends)
compcompenergies = energetics.uniform(fullends.comps, fullends.comps)
i = np.flatnonzero((selfselfenergies < maxself) & (compcompenergies <
maxself))
return fullends[i]
return spacefilter
def endfilter_standard(maxspurious):
"""
An endfilter function: filters out ends that have any (end-end, end-comp,
comp-end, comp-comp) interactions with new ends above maxspurious.
"""
def endfilter(newends, currentends, availends, energetics):
endendspurious = energetics.uniform(
np.repeat(newends.ends, len(availends), 0),
np.tile(availends.ends, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
endcompspurious = energetics.uniform(
np.repeat(newends.ends, len(availends), 0),
np.tile(availends.comps, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
compendspurious = energetics.uniform(
np.repeat(newends.comps, len(availends), 0),
np.tile(availends.ends, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
compcompspurious = energetics.uniform(
np.repeat(newends.comps, len(availends), 0),
np.tile(availends.comps, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
highspurious = np.amax(
np.hstack((endendspurious, compendspurious, endcompspurious,
compcompspurious)), 1)
return availends[highspurious < maxspurious]
return endfilter
def endfilter_standard_advanced(maxcompspurious, maxendspurious):
"""
An endfilter function: filters out ends that have end-comp or comp-end
interactions above maxcompspurious, and end-end or comp-comp interactions
above maxendspurious.
"""
def endfilter(newends, currentends, availends, energetics):
endendspurious = energetics.uniform(
np.repeat(newends.ends, len(availends), 0),
np.tile(availends.ends, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
endcompspurious = energetics.uniform(
np.repeat(newends.ends, len(availends), 0),
np.tile(availends.comps, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
compendspurious = energetics.uniform(
np.repeat(newends.comps, len(availends), 0),
np.tile(availends.ends, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
compcompspurious = energetics.uniform(
np.repeat(newends.comps, len(availends), 0),
np.tile(availends.comps, (len(newends), 1))).reshape(
len(availends), len(newends), order='F')
highendspurious = np.amax(
np.hstack((endendspurious, compcompspurious)), 1)
highcompspurious = np.amax(
np.hstack((compendspurious, endcompspurious)), 1)
return availends[(highendspurious < maxendspurious)
& (highcompspurious < maxcompspurious)]
return endfilter
def energy_array_uniform(seqs, energetics):
"""
Given an endarray and a set of sequences, return an array of the
interactions between them, including their complements.
"""
seqs = seqs.ends.append(seqs.comps)
return energetics.uniform(
np.repeat(seqs, seqs.shape[0], 0), np.tile(
seqs, (seqs.shape[0], 1))).reshape((seqs.shape[0], seqs.shape[0]))
def endchooser_standard(desint, wiggle=0.0):
"""
An endchooser function: return a random end with end-comp energy closest to
desint.
"""
def endchooser(currentends, availends, energetics):
ddiff = np.abs(energetics.matching_uniform(availends) - desint)
choices = np.flatnonzero(ddiff <= np.amin(ddiff)+wiggle)
newend = availends[choices[np.random.randint(0, len(choices))]]
return newend
return endchooser
def endchooser_random():
"""
An endchooser function: return a random end with end-comp energy closest to
desint.
"""
def endchooser(currentends, availends, energetics):
newend = availends[np.random.randint(0, len(availends))]
return newend
return endchooser
|
999,127 | 42ec590228d9c1af3dd94910cea963c38b4b862b | from django.shortcuts import render, redirect
from django.views.generic import TemplateView, CreateView, ListView, DetailView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from .models import *
from django.core.exceptions import PermissionDenied
from django.db import connection
from django.db.models import Sum, Count
import datetime
from registration.backends.simple.views import RegistrationView
from forms import UserProfileRegistrationForm
from .forms import *
# Create your views here.
class Home(TemplateView):
template_name = "home.html"
class PledgeCreateView(CreateView):
model = Pledge
template_name = "pledge/pledge_form.html"
fields = ['amount']
success_url = reverse_lazy('pledge_list')
def form_valid(self,form):
form.instance.user = self.request.user
return super(PledgeCreateView, self).form_valid(form)
class ContributionCreateView(CreateView):
model = Contribution
template_name = "contribution/contribution_form.html"
fields = ['amount', 'notes']
success_url = reverse_lazy('contribution_list')
def form_valid(self,form):
form.instance.user = self.request.user
return super(ContributionCreateView, self).form_valid(form)
class PledgeListView(ListView):
model = Pledge
template_name = "pledge/pledge_list.html"
paginate_by = 10
class ContributionListView(ListView):
model = Contribution
template_name = "contribution/contribution_list.html"
paginate_by = 10
class PledgeDetailView(DetailView):
model = Pledge
template_name = 'pledge/pledge_detail.html'
class ContributionDetailView(DetailView):
model = Contribution
template_name = 'contribution/contribution_detail.html'
class PledgeUpdateView(UpdateView):
model = Pledge
template_name = 'pledge/pledge_form.html'
fields = ['amount']
def get_object(self, *args, **kwargs):
object = super(PledgeUpdateView, self).get_object(*args, **kwargs)
if object.user != self.request.user:
raise PermissionDenied()
return object
class ContributionUpdateView(UpdateView):
model = Contribution
template_name = 'contribution/contribution_form.html'
fields = ['amount', 'notes']
def get_object(self, *args, **kwargs):
object = super(ContributionUpdateView, self).get_object(*args, **kwargs)
if object.user != self.request.user:
raise PermissionDenied()
return object
class PledgeDeleteView(DeleteView):
model = Pledge
template_name = 'pledge/pledge_confirm_delete.html'
success_url = reverse_lazy('pledge_list')
def get_object(self, *args, **kwargs):
object = super(PledgeDeleteView, self).get_object(*args, **kwargs)
if object.user != self.request.user:
raise PermissionDenied()
return object
class ContributionDeleteView(DeleteView):
model = Contribution
template_name = 'contribution/contribution_confirm_delete.html'
success_url = reverse_lazy('contribution_list')
def get_object(self, *args, **kwargs):
object = super(ContributionDeleteView, self).get_object(*args, **kwargs)
if object.user != self.request.user:
raise PermissionDenied()
return object
class UserDetailView(DetailView):
model = User
slug_field = 'username'
template_name = 'user/user_detail.html'
context_object_name = 'user_in_view'
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
user_in_view = User.objects.get(username=self.kwargs['slug'])
contributions = Contribution.objects.filter(user=user_in_view)
context['contributions'] = contributions
pledges = Pledge.objects.filter(user=user_in_view)
context['pledges'] = pledges
userprofile = UserProfile.objects.filter(user=user_in_view)
context['userprofile'] = userprofile
return context
class UserUpdateView(UpdateView):
model = User
slug_field = "username"
template_name = "user/user_form.html"
form_class = UserProfileUpdateForm
def get_success_url(self):
return reverse('user_detail', args=[self.request.user.username])
def get_object(self, *args, **kwargs):
object = super(UserUpdateView, self).get_object(*args, **kwargs)
if object != self.request.user:
raise PermissionDenied()
return object
class AboutUsView(TemplateView):
template_name = "about_us.html"
class ParentTipsView(TemplateView):
template_name = "parent_tips.html"
class UserListView(ListView):
model = User
template_name = 'leaderboards.html'
def get_queryset(self):
queryset = super(UserListView, self).get_queryset()
return queryset.annotate(
contributions_count=Count('contribution'),
contributions_total=Sum('contribution__amount'),
).order_by("-contributions_total")[:5]
class MonthlyListView(ListView):
model = User
template_name = 'monthly_leaderboards.html'
def get_queryset(self):
queryset = super(MonthlyListView, self).get_queryset()
today = datetime.date.today()
this_month_start = today.replace(day = 1)
if today.month == 12:
next_month_start = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month_start = today.replace(month=today.month + 1, day=1)
User.objects.filter(
contribution__date__gte = this_month_start,
contribution__date__lt = next_month_start,
).annotate(
monthly_count=Count('contribution'),
monthly_total=Sum('contribution__amount')
).order_by("-monthly_total")[:5]
return queryset
class SearchContributionListView(ContributionListView):
def get_queryset(self):
incoming_query_string = self.request.GET.get('query', '')
return Contribution.objects.filter(notes__icontains=incoming_query_string)
class UserDeleteView(DeleteView):
model = User
slug_field = "username"
template_name = 'user/user_confirm_delete.html'
def get_success_url(self):
return reverse_lazy('logout')
def get_object(self, *args, **kwargs):
object = super(UserDeleteView, self).get_object(*args, **kwargs)
if object != self.request.user:
raise PermissionDenied()
return object
def delete(self, request, *args, **kwargs):
user = super(UserDeleteView, self).get_object(*args)
user.is_active = False
user.save()
return redirect(self.get_success_url())
class UserProfileRegistrationView(RegistrationView):
form_class = UserProfileRegistrationForm
def register(self, request, form_class):
new_user = super(UserProfileRegistrationView, self).register(request, form_class)
user_profile = UserProfile()
user_profile.user = new_user
user_profile.state = form_class.cleaned_data['state']
user_profile.save()
return user_profile
def get_success_url(self, request, user):
return reverse_lazy('home') |
999,128 | 64c6f1d63d761614cdaaaaecb0432da3c97c281d | n1 = float(input('Digite a n1: '))
n2 = float(input('Digite a n2: '))
calcNota = (n1+n2)/2
if calcNota < 5.0:
print('Sua média é {:.1f}, você foi reprovado'.format(calcNota))
elif calcNota == 5.0 and calcNota < 6.9:
print('Sua média foi {:.1f}, você tá na recuperação'.format(calcNota))
else:
print('Sua média foi {:.1f}, você foi aprovado!'.format(calcNota))
#Exercício Python 040: Crie um programa que leia duas notas de um aluno e calcule sua média, mostrando uma mensagem
# no final, de acordo com a média atingida:
#- Média abaixo de 5.0: REPROVADO
#- Média entre 5.0 e 6.9: RECUPERAÇÃO
#- Média 7.0 ou superior: APROVADO |
999,129 | 0758176e6a487a375069b6cec7f378d49cd5063d | ### LED THINGS BELOW
# Turn backlight off
#lcd.backlight = False
#user input to switch this variable
from datetime import datetime
calibration = False
def CalibrateSensor():
if calibration == True:
print('Calibrating Black...')
CalibState = 0
else:
minR = minG = minB = 0
maxR = maxG = maxB = 255
## Prompt user to turn off all lights. change to person input,
# do right after CalibrateSensor. Run this program again but
# also prompt the user to turn on all the lights.
def ButtonPressed():
if CalibState == 0 :
minR = R_one
minG = G_one
minB = B_one
print('Calibrating White...')
CalibState = 1
elif CalibState == 1:
maxR = R_one
maxG = G_one
maxB = B_one
calibState = 2
print('Calibration Complete')
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def map(value, minVal, maxVal, newMin, newMax):
return (((value - minVal) * (newMax - newMin)) / (maxVal - minVal)) + newMin
def collect(Red1,Green1,Blue1,Red2,Green2,Blue2,time2,time3):
#turn off LED light using pin low for led light
minR = minG = minB = 0
maxR = maxG = maxB = 255
R_one = sensor1.color_rgb_bytes[0]
G_one = sensor1.color_rgb_bytes[1]
B_one = sensor1.color_rgb_bytes[2]
time2.append(datetime.now())
R_two = sensor2.color_rgb_bytes[0]
G_two = sensor2.color_rgb_bytes[1]
B_two = sensor2.color_rgb_bytes[2]
time3.append(datetime.now())
#Re-Map values
R_one_m = map(R_one, minR, maxR, 0, 255)
G_one_m = map(G_one, minG, maxG, 0, 255)
B_one_m = map(B_one, minB, maxB, 0, 255)
R_two_m = map(R_two, minR, maxR, 0, 255)
G_two_m = map(G_two, minG, maxG, 0, 255)
B_two_m = map(B_two, minB, maxB, 0, 255)
#constrain values
R_one = constrain(R_one_m, 0, 255)
G_one = constrain(G_one_m, 0, 255)
B_one = constrain(B_one_m, 0, 255)
R_two = constrain(R_two_m, 0, 255)
G_two = constrain(G_two_m, 0, 255)
B_two = constrain(B_two_m, 0, 255)
#Gr_one = (0.3 * R_one) + (0.59 * G_one) + (0.11 * B_one)
#Gr_two = (0.3 * R_two) + (0.59 * G_two) + (0.11 * B_two)
# print values on screen/store values in array
print("RGB_1 ", R_one, G_one, B_one)
Red1.append(R_one)
Green1.append(G_one)
Blue1.append(B_one)
#collect.Gray1.append(Gr_one)
print("RGB_2 ", R_two, G_two, B_two)
Red2.append(R_two)
Green2.append(G_two)
Blue2.append(B_two)
#collect.Gray2.append(Gr_two)
# time sleep?
return Red1,Green1,Blue1,Red2,Green2,Blue2,time2,time3
def getGrAvg(r,g,b):
SumR = sum(r[-10:])
SumG = sum(g[-10:])
SumB = sum(b[-10:])
AvgR = SumR/len(r)
AvgG = SumG/len(g)
AvgB = SumB/len(b)
Gr = (0.3 * AvgR) + (0.59 * AvgG) + (0.11 * AvgB)
return Gr
def getGr(r,g,b):
Gr = (0.3 * r[-1]) + (0.59 * g[-1]) + (0.11 * b[-1])
return Gr
def check_rgb(R_one, G_one, B_one):
if calibration == True:
if CalibState << 2:
print("RGB_1 ", R_one, G_one, B_one)
|
999,130 | 47aa3b6908fa830274bf219f816ff1df89914491 | def greet():
print("Hey there")
print("Welcome Beatriz")
greet() |
999,131 | e9ba567ee6fa84faf987d8053d7601af34f912ca | import heapq
def solve(k, xs):
ys = [-x for x in xs]
heapq.heapify(ys)
count = 0
for _ in range(k):
count += -ys[0]
heapq.heapreplace(ys, -((-ys[0]) / 2))
return count
if __name__ == '__main__':
for _ in range(int(raw_input())):
_, k = map(int, raw_input().split())
xs = map(int, raw_input().split())
print solve(k, xs)
|
999,132 | 65d16497374188369e76c754cdbfeb6c96b73b4e | import event_handling
from MathMan import variables
def reset(new_game):
variables.ghost_reset = True
event_handling.my_hero.direction = None
variables.math_man_buff = False
variables.buff_timer = 0
variables.fruit_timer = int(variables.speed_setting) * 60
variables.grid = [['BDR','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','BDL','BDR','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','BDL'],
['B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V'],
['B_V','P_N','BDR','B_H','B_H','BDL','P_N','BDR','B_H','B_H','B_H','BDL','P_N','B_V','B_V','P_N','BDR','B_H','B_H','B_H','BDL','P_N','BDR','B_H','B_H','BDL','P_N','B_V'],
['B_V','P_B','B_V','NON','NON','B_V','P_N','B_V','NON','NON','NON','B_V','P_N','B_V','B_V','P_N','B_V','NON','NON','NON','B_V','P_N','B_V','NON','NON','B_V','P_B','B_V'],
['B_V','P_N','BUR','B_H','B_H','BUL','P_N','BUR','B_H','B_H','B_H','BUL','P_N','BUR','BUL','P_N','BUR','B_H','B_H','B_H','BUL','P_N','BUR','B_H','B_H','BUL','P_N','B_V'],
['B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V'],
['B_V','P_N','BDR','B_H','B_H','BDL','P_N','BDR','BDL','P_N','BDR','B_H','B_H','B_H','B_H','B_H','B_H','BDL','P_N','BDR','BDL','P_N','BDR','B_H','B_H','BDL','P_N','B_V'],
['B_V','P_N','BUR','B_H','B_H','BUL','P_N','B_V','B_V','P_N','BUR','B_H','B_H','BDL','BDR','B_H','B_H','BUL','P_N','B_V','B_V','P_N','BUR','B_H','B_H','BUL','P_N','B_V'],
['B_V','P_N','P_N','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','P_N','P_N','B_V'],
['BUR','B_H','B_H','B_H','B_H','BDL','P_N','B_V','BUR','B_H','B_H','BDL','NON','B_V','B_V','NON','BDR','B_H','B_H','BUL','B_V','P_N','BDR','B_H','B_H','B_H','B_H','BUL'],
['NON','NON','NON','NON','NON','B_V','P_N','B_V','BDR','B_H','B_H','BUL','NON','BUR','BUL','NON','BUR','B_H','B_H','BDL','B_V','P_N','B_V','NON','NON','NON','NON','NON'],
['NON','NON','NON','NON','NON','B_V','P_N','B_V','B_V','NON','NON','NON','NON','NON','NON','NON','NON','NON','NON','B_V','B_V','P_N','B_V','NON','NON','NON','NON','NON'],
['NON','NON','NON','NON','NON','B_V','P_N','B_V','B_V','NON','BDR','B_H','B_H','NON','NON','B_H','B_H','BDL','NON','B_V','B_V','P_N','B_V','NON','NON','NON','NON','NON'],
['B_H','B_H','B_H','B_H','B_H','BUL','P_N','BUR','BUL','NON','B_V','NON','NON','NON','NON','NON','NON','B_V','NON','BUR','BUL','P_N','BUR','B_H','B_H','B_H','B_H','B_H'],
['NON','NON','NON','NON','NON','NON','P_N','NON','NON','NON','B_V','NON','NON','NON','NON','NON','NON','B_V','NON','NON','NON','P_N','NON','NON','NON','NON','NON','NON'],
['NON','NON','NON','NON','NON','NON','P_N','NON','NON','NON','B_V','NON','NON','NON','NON','NON','NON','B_V','NON','NON','NON','P_N','NON','NON','NON','NON','NON','NON'],
['B_H','B_H','B_H','B_H','B_H','BDL','P_N','BDR','BDL','NON','B_V','NON','NON','NON','NON','NON','NON','B_V','NON','BDR','BDL','P_N','BDR','B_H','B_H','B_H','B_H','B_H'],
['NON','NON','NON','NON','NON','B_V','P_N','B_V','B_V','NON','BUR','B_H','B_H','B_H','B_H','B_H','B_H','BUL','NON','B_V','B_V','P_N','B_V','NON','NON','NON','NON','NON'],
['NON','NON','NON','NON','NON','B_V','P_N','B_V','B_V','NON','NON','NON','NON','NON','NON','NON','NON','NON','NON','B_V','B_V','P_N','B_V','NON','NON','NON','NON','NON'],
['NON','NON','NON','NON','NON','B_V','P_N','B_V','B_V','NON','BDR','B_H','B_H','B_H','B_H','B_H','B_H','BDL','NON','B_V','B_V','P_N','B_V','NON','NON','NON','NON','NON'],
['BDR','B_H','B_H','B_H','B_H','BUL','P_N','BUR','BUL','NON','BUR','B_H','B_H','BDL','BDR','B_H','B_H','BUL','NON','BUR','BUL','P_N','BUR','B_H','B_H','B_H','B_H','BDL'],
['B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V'],
['B_V','P_N','BDR','B_H','B_H','BDL','P_N','BDR','B_H','B_H','B_H','BDL','P_N','B_V','B_V','P_N','BDR','B_H','B_H','B_H','BDL','P_N','BDR','B_H','B_H','BDL','P_N','B_V'],
['B_V','P_N','BUR','B_H','BDL','B_V','P_N','BUR','B_H','B_H','B_H','BUL','P_N','BUR','BUL','P_N','BUR','B_H','B_H','B_H','BUL','P_N','B_V','BDR','B_H','BUL','P_N','B_V'],
['B_V','P_B','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','M_M','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_B','B_V'],
['BUR','B_H','BDL','P_N','B_V','B_V','P_N','BDR','BDL','P_N','BDR','B_H','B_H','B_H','B_H','B_H','B_H','BDL','P_N','BDR','BDL','P_N','B_V','B_V','P_N','BDR','B_H','BUL'],
['BDR','B_H','BUL','P_N','BUR','BUL','P_N','B_V','B_V','P_N','BUR','B_H','B_H','BDL','BDR','B_H','B_H','BUL','P_N','B_V','B_V','P_N','BUR','BUL','P_N','BUR','B_H','BDL'],
['B_V','P_N','P_N','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','B_V','B_V','P_N','P_N','P_N','P_N','P_N','P_N','B_V'],
['B_V','P_N','BDR','B_H','B_H','B_H','B_H','BUL','BUR','B_H','B_H','BDL','P_N','B_V','B_V','P_N','BDR','B_H','B_H','BUL','BUR','B_H','B_H','B_H','B_H','BDL','P_N','B_V'],
['B_V','P_N','BUR','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','BUL','P_N','BUR','BUL','P_N','BUR','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','BUL','P_N','B_V'],
['B_V','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','P_N','B_V'],
['BUR','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','B_H','BUL']]
if new_game:
variables.score = 0
variables.lives = 3 |
999,133 | 55169f46e53af88d023f59e945dd249d5a0d1629 | fist_number, second_number, third_number = [int(input()) for _ in range(3)]
if 1 <= fist_number <= 9 and 1 <= second_number <= 9 and 1 <= third_number <= 9:
for a in range(2, fist_number + 1, 2):
for b in range(2, second_number + 1):
for c in range(2, third_number + 1, 2):
if b == 2 or b == 3 or b == 5 or b == 7:
print(f"{a} {b} {c}")
else:
print(input(f"Needs to be between 1 and 9: "))
|
999,134 | 5dcebc13fdfe250e37e578b3c4ddc22c08acbafd | from django.db import models
# from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
company_name = models.CharField(max_length=200)
description = models.TextField(max_length=200)
owner = models.CharField(max_length=200)
password = models.CharField(max_length=50)
email = models.EmailField()
phone = models.IntegerField()
class Meta:
verbose_name_plural = 'Profile'
def __str__(self):
return self.company_name
class Report(models.Model):
name = models.CharField(max_length=200)
statement = models.TextField(max_length=200)
email = models.EmailField()
phone = models.IntegerField()
class Meta:
verbose_name_plural = 'Report'
def __str__(self):
return self.name |
999,135 | 90bf5a045e9343fc9b7c976994c63d8d5cf58a47 | # Generated by Django 3.1.5 on 2021-05-23 11:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DepartmentD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DepName', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='SectionS',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('section', models.CharField(max_length=150)),
('Dep', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='acc.departmentd')),
],
),
migrations.CreateModel(
name='profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('FullName', models.CharField(blank=True, max_length=50, null=True, verbose_name='Fullname')),
('Mobile', models.CharField(blank=True, max_length=150, null=True, verbose_name=' Mobile')),
('Address', models.CharField(blank=True, max_length=150, null=True, verbose_name=' Mobile')),
('image', models.ImageField(blank=True, null=True, upload_to='profile', verbose_name='Images')),
('slug', models.SlugField(blank=True, null=True, verbose_name='slug')),
('DepNameD', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='acc.departmentd', verbose_name='Department')),
('secS', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='acc.sections', verbose_name='section')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
|
999,136 | 97fdc4031bfb2d296e56728c208a44ccec5ebafd | import random
class Credito:
def __init__(self, score, renda):
self.score = score
self.renda = renda
self.REGRA_LIMITE = {
range(1, 300): self._reprovado,
range(300, 600): self._mil,
range(600, 800): self._valor_minimo_se_cinquenta_porcento,
range(800, 951): self._duzentos_porcento,
range(951, 1000): self._sem_limite
}
def calcular_limite(self):
for _range, callback in self.REGRA_LIMITE.items():
if self.score in _range:
return callback()
return self._reprovado()
def _reprovado(self):
return 0
def _mil(self):
return 1000
def _valor_minimo_se_cinquenta_porcento(self):
if self.renda * 0.5 < 1000:
return self.renda
return self.renda * 0.5
def _duzentos_porcento(self):
return self.renda * 2
def _sem_limite(self):
return 1000000
def get_score():
return random.randint(1, 999)
class Cpf:
def __init__(self, value):
self._pesos = {
'primeiro': [10, 9, 8, 7, 6, 5, 4, 3, 2],
'segundo': [11, 10, 9, 8, 7, 6, 5, 4, 3, 2],
}
self.value = value
def is_valid(self):
self.value = list(map(int, self.value))
valor_repetido = self._valida_se_repetido()
if valor_repetido:
return False
primeiro_digito = self._valida_primeiro()
segundo_digito = self._valida_segundo()
return primeiro_digito and segundo_digito
def _calcula_digito(self, peso):
soma = 0
for index in range(len(peso)):
soma += self.value[index] * peso[index]
resto = soma % 11
if resto < 2:
return 0
return 11 - resto
def _valida_primeiro(self):
valor_esperado = self._calcula_digito(self._pesos['primeiro'])
return self.value[-2] == valor_esperado
def _valida_segundo(self):
valor_esperado = self._calcula_digito(self._pesos['segundo'])
return self.value[-1] == valor_esperado
def _valida_se_repetido(self):
return len(set(self.value)) == 1
|
999,137 | e96a2e664a69e970a8ca55604bdd28f6fbdc0672 | from django import forms
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django.utils.html import conditional_escape
from django.utils.encoding import force_str
from django import VERSION as DJANGO_VERSION
if DJANGO_VERSION < (1, 8):
from django.forms.util import flatatt
else:
from django.forms.utils import flatatt
from mezzanine.conf import settings
class PageDownWidget(forms.Textarea):
"""
Widget providing Markdown editor using PageDown JavaScript, and live
preview.
Live preview can be generated client-side using PageDown, or
server-side using python-markdown.
"""
class Media:
css = {'all': (
'mezzanine_pagedown/css/pagedown.css',
'mezzanine/css/smoothness/jquery-ui-1.9.1.custom.min.css',)}
js = ('mezzanine_pagedown/pagedown/Markdown.Converter.js',
'mezzanine_pagedown/pagedown/Markdown.Sanitizer.js',
'mezzanine_pagedown/pagedown/Markdown.Editor.js',
'mezzanine/js/%s' % settings.JQUERY_FILENAME,
'mezzanine/js/%s' % settings.JQUERY_UI_FILENAME,
'filebrowser/js/filebrowser-popup.js',
'mezzanine_pagedown/js/jquery.ba-throttle-debounce.min.js',
'mezzanine_pagedown/js/jquery.cookie.js')
def __init__(self, template=None, *args, **kwargs):
self.template = template or 'mezzanine_pagedown/editor.html'
super(PageDownWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs={}, renderer=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, extra_attrs=dict(name=name))
final_id = ''
if 'id' in final_attrs:
final_id = final_attrs['id'].replace('-', '_')
del final_attrs['id']
return mark_safe(render_to_string(self.template, {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_str(value)),
'id': final_id,
'server_side_preview': settings.PAGEDOWN_SERVER_SIDE_PREVIEW,
}))
class PlainWidget(forms.Textarea):
"""
A regular Textarea widget that is compatible with mezzanine richtext.
"""
class Media:
pass
|
999,138 | 62a328bc46a06d2a5ac72926faabd72237f458e9 |
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr
from email.header import Header
def send_automail(mail_name,stock_name,result_str,status_result,set_price,cur_val,update_time):
#send via Gmail, replace None to your user and password
user = None
password = None
if None in (user,password):
try:
from Credential import email_credential
user,password = email_credential()
except ImportError as Ie:
print("Please replace the None value in user and password variable to your gmail Credential")
os.abort()
sender = user
receiver = mail_name
mail = MIMEMultipart('alternative')
mail['Subject'] = "Notification (Stock Name: {})".format(stock_name)
mail['From'] = formataddr((str(Header('StockBot','utf-8')),sender))
mail['To'] = ', '.join(receiver)
message = """
</style>
<!--[if gte mso 9]><xml>
<o:shapedefaults v:ext="edit" spidmax="1026" />
</xml><![endif]-->
<!--[if gte mso 9]><xml>
<o:shapelayout v:ext="edit">
<o:idmap v:ext="edit" data="1" />
</o:shapelayout></xml><![endif]-->
</meta>
</meta>
</head>
<body lang="EN-US" link="#0563C1" vlink="#954F72">
<div class="WordSection1">
<p class="MsoNormal">
Hello!
<o:p>
</o:p>
</p>
<p class="MsoNormal">
<o:p>
</o:p>
</p>
<p class="MsoNormal">
Stock Name: {}
<o:p>
</o:p>
</p>
<p class="MsoNormal">
{}!
<o:p>
</o:p>
</p>
<p class="MsoNormal">
<o:p>
</o:p>
</p>
<p class="MsoNormal">
{} Price: {}
<o:p>
</o:p>
</p>
<p class="MsoNormal">
Current Value: {} As of {}
<o:p>
</o:p>
</p>
<p class="MsoNormal">
<o:p>
</o:p>
</p>
<p class="MsoNormal">
Goodluck!
<o:p>
</o:p>
</p>
<p class="MsoNormal">
<o:p>
</o:p>
</p>
<p class="MsoNormal">
<o:p>
</o:p>
</p>
</div>
</body>
</html>
""".format(stock_name,result_str,status_result,set_price,cur_val,update_time)
#send via Gmail
user = None
password = None
if None in (user,password):
try:
from Credential import email_credential
user,password = email_credential()
except ImportError as Ie:
print("Please change the None value to your gmail Credential")
html_in = MIMEText(message,'html')
mail.attach(html_in)
smtpObj = smtplib.SMTP('smtp.gmail.com', 587)
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login(user,password)
smtpObj.sendmail(sender, receiver, mail.as_string())
smtpObj.quit()
print ("Message Sent!")
|
999,139 | 20aed1c454a54c3e53a475c898416d85ba1c5938 | def sum_odd_fib(n):
prev_n = 0
curr_n = 1
result = 0
while (curr_n <= n):
if curr_n % 2 != 0:
result += curr_n
curr_n += prev_n
prev_n = curr_n - prev_n
return result
# Test
print(sum_odd_fib(10)) # 10
print(sum_odd_fib(1000)) # 1785
print(sum_odd_fib(4000000)) # 4613732
|
999,140 | e25ef9991dc71ea5fe6ead1df4f354bd4606aa21 | from sqlite3 import *
from random import *
import os
def convert(k):
"""Entier -> str avec un zéro devant"""
if k<10:
return '0'+str(k)
else :
return str(k)
def conv_time(t):
"""Convertit un temps t en seconde (depuis minuit) au format hh:mm:ss"""
h,m = t // 3600, t % 3600
m,s = m // 60, m % 60
return convert(h) + ':' + convert(m) + ':' + convert(s)
def copie_bdd(nb):
"""Copie nb bdd à partir de hotel, chacune avec environ nbparties"""
if os.path.exists('./bdd/') == False:
os.mkdir('./bdd/')
for i in range(nb):
nom_de_bdd = './bdd/hotel_'+convert(i+1)+'.db'
os.system('cp hotel.db '+ nom_de_bdd)
return None
def supprimer_client(nom_de_bdd,id_clients):
"""supprime toutes les donnees liees à tous les id_clients contenu dans id_clients"""
reqs=[]
reqs.append("DELETE FROM T_CLIENT WHERE CLI_ID=")
reqs.append("DELETE FROM T_EMAIL WHERE CLI_ID=")
reqs.append("DELETE FROM T_TELEPHONE WHERE CLI_ID=")
reqs.append("DELETE FROM T_ADRESSE WHERE CLI_ID=")
reqs.append("DELETE FROM T_FACTURE WHERE CLI_ID=")
c = connect(nom_de_bdd)
for id_client in id_clients:
for req in reqs:
c.execute(req+str(id_client)+";")
c.commit()
c.close()
def supprimer_facture(nom_de_bdd,id_factures):
"""supprime toutes les donnees liees à tous les id_clients contenu dans id_clients"""
reqs=[]
reqs.append("DELETE FROM T_LIGNE_FACTURE WHERE FAC_ID=")
reqs.append("DELETE FROM T_FACTURE WHERE FAC_ID=")
c = connect(nom_de_bdd)
for id_facture in id_factures:
for req in reqs:
c.execute(req+str(id_facture)+";")
c.commit()
c.close()
def generer_clients_a_effacer(i):
if i%2==0:
id_clients=[]
else:
id_clients=[22,98]
if i%5==0:
id_clients.append(2)
if i%6==0:
id_clients.append(94)
for k in range(i//7+1):
dec=(k+1)*11
if i+dec==100:
id_clients.append(i+11)
else:
id_clients.append((i+dec)%100)
return id_clients
def generer_facture_a_effacer(i):
id_factures=[]
dec=1
while dec+i<=2374:
dec+=7+i//10
id_factures.append(dec)
return id_factures
def modifier_montant_remise(nom_de_bdd,i):
"""supprime toutes les donnees liees à tous les id_clients contenu dans id_clients"""
req1="UPDATE T_LIGNE_FACTURE SET LIF_REMISE_POURCENT="+str(i+16)+" WHERE LIF_REMISE_POURCENT=15"
req2="UPDATE T_LIGNE_FACTURE SET LIF_REMISE_MONTANT="+str(i+50)+" WHERE LIF_REMISE_MONTANT=50"
c = connect(nom_de_bdd)
c.execute(req1+";")
c.commit()
c.close()
c = connect(nom_de_bdd)
c.execute(req2+";")
c.commit()
c.close()
nb=99
copie_bdd(nb)
for i in range(nb):
nom_de_bdd = './bdd/hotel_'+convert(i+1)+'.db'
id_clients=generer_clients_a_effacer(i+0)
id_factures=generer_facture_a_effacer(i+0)
supprimer_client(nom_de_bdd,id_clients)
supprimer_facture(nom_de_bdd,id_factures)
modifier_montant_remise(nom_de_bdd,i)
# Pour créer un exemple : cree_bdd('ex.sqlite')
# Pour créer les 99 bdd du DS: cree_plein_bdd(99)
|
999,141 | 433b1353644740b4810f955d5812583417c5a2b2 | names_list = ["Adam", "Anne", "Barry", "Barry", "Brianne", "Charlie", "Cassandra", "David", "Dana"]
# Converts names to uppercase
uppercase_names = (name.upper() for name in names_list)
print(names_list)
print(uppercase_names)
print(list(uppercase_names))
|
999,142 | 945346c3b81a7b5e94746211a4230ebb0334ba25 | # -*- coding: utf-8 -*-
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals import joblib
from collections import Counter
import re
import nltk
# from nltk.corpus import stopwords
import pandas as pd
# import numpy as np
# stopwords = set(stopwords.words("english"))
stopwords = {u'a',
u'about',
u'above',
u'after',
u'again',
u'against',
u'all',
u'am',
u'an',
u'and',
u'any',
u'are',
u'as',
u'at',
u'be',
u'because',
u'been',
u'before',
u'being',
u'below',
u'between',
u'both',
u'but',
u'by',
u'can',
u'did',
u'do',
u'does',
u'doing',
u'don',
u'down',
u'during',
u'each',
u'expect',
u'few',
u'for',
u'from',
u'further',
u'had',
u'has',
u'have',
u'having',
u'he',
u'her',
u'here',
u'hers',
u'herself',
u'him',
u'himself',
u'his',
u'how',
u'i',
u'if',
u'in',
u'into',
u'is',
u'it',
u'its',
u'itself',
u'just',
u'me',
u'more',
u'most',
u'my',
u'myself',
u'new',
u'no',
u'nor',
u'not',
u'now',
u'of',
u'off',
u'on',
u'once',
u'only',
u'or',
u'other',
u'our',
u'ours',
u'ourselves',
u'out',
u'over',
u'own',
u's',
u'same',
u'she',
u'should',
u'since',
u'so',
u'some',
u'such',
u't',
u'than',
u'that',
u'the',
u'their',
u'theirs',
u'them',
u'themselves',
u'then',
u'there',
u'these',
u'they',
u'this',
u'those',
u'through',
u'to',
u'too',
u'under',
u'until',
u'up',
u'very',
u'was',
u'we',
u'were',
u'what',
u'when',
u'where',
u'which',
u'while',
u'who',
u'whom',
u'why',
u'will',
u'with',
u'you',
u'your',
u'yours',
u'yourself',
u'yourselves'}
at_remove = re.compile(ur'@[_\s]{0,2}[a-zA-Z\d_]+\s{1}')
extra_remove = re.compile(ur"[^a-zA-Z]")
def remove_at(sent):
prep_sent = re.sub(at_remove,'',sent)
prep_sent = re.sub(extra_remove,' ',prep_sent)
twitter_preprocessed = prep_sent
return twitter_preprocessed
def tokenize(sent):
tokens = nltk.word_tokenize(sent)
final_tokens = [w.lower() for w in tokens if not w in stopwords and len(w)>1]
tokenized_twitter = final_tokens
return tokenized_twitter
def find_repeating_strings(sent):
multiple_str_count = []
match = re.match(ur'(\w*([a-z])\2+\w*)',sent,re.IGNORECASE)
if match:
multiple_str_count.append(1)
else:
multiple_str_count.append(0)
return multiple_str_count
def main_preprocess(text):
twitter_preprocess = remove_at(text)
twitter_tokens = tokenize(twitter_preprocess)
twitter_token_sent = [' '.join(twitter_tokens)]
# count_vect = CountVectorizer()
# train_data_features = count_vect.fit_transform(twitter_token_sent)
# train_features = train_data_features.toarray()
# (row,col) = train_features.shape
# train_features = train_features.reshape(col)
feature_dict = dict((i,twitter_tokens.count(i)) for i in twitter_tokens)
vocab_vector = joblib.load('sklearn_model/count_vector_sent.pkl')
vocabulary = vocab_vector.vocabulary_
feature_vector = [0]*len(vocabulary)
for token in twitter_tokens:
if token in vocabulary.keys():
feature_vector[vocabulary[token]] = feature_dict[token]
multiple_str_count = find_repeating_strings(twitter_preprocess)
X_test = pd.DataFrame(feature_vector).transpose()
X_test = pd.concat([X_test,pd.DataFrame(multiple_str_count)],axis=1)
# print 'X_test', X_test
# print 'Shape', X_test.shape
return X_test
def check_sentiment(text):
text = text.encode('utf-8')
text = str(text)
# print 'Text yeshhhhh', text
# print 'The type', type(text)
if isinstance(text, str):
return main_preprocess(text)
def start_predict(text):
test_vector = check_sentiment(text)
# print 'Test Vector', test_vector
clf = joblib.load('sklearn_model/random_forest_sent.pkl')
result = clf.predict(test_vector)
# print 'Result', result
# print 'Lne', len(result)
# print 'RR', result[0]
if len(result):
print 'Yes inside'
if result[0] == 0:
return "The text is negative"
else:
return "The text is positive"
if __name__ == 'main':
pass
# start_predict('''@SneakerShouts: Sick shot of the Nike Air Python in "Brown Snakeskin"
# Sizes available on Nike here -> http://t.co/neluoxm91s http://t.c…''') |
999,143 | 96523090c18b275a912ff90eca815704620bfb61 | import os
files = os.listdir("/home/hamza/MEGA/new/allTrain/labels/")
# print(files)
for file in files:
with open("/home/hamza/MEGA/new/allTrain/labels/"+file, "r") as f:
lines = f.readlines() #reads one line at a time
with open("/home/hamza/MEGA/new/allTrain/labels/"+file, "w") as f:
for line in lines:
if line.split()[0] == "0":
f.write(line)
if line.split()[0] == "2":
x = line.replace("2","0",1)
f.write(x)
if line.split()[0] == "None":
x = line.replace("None","0",1)
f.write(x) |
999,144 | e3182248083442fc8e2be38fc232701cf4564810 | import tinys3
from threading import Thread
from flask import current_app
def send_async_file(app, filename):
with app.app_context():
AWS_ACCESS_KEY = current_app.config['AWS_ACCESS_KEY']
AWS_SECRET_KEY = current_app.config['AWS_SECRET_KEY']
PAPER_BUCKET = current_app.config['PAPER_BUCKET']
ENDPOINT = current_app.config['S3_ENDPOINT']
f = open(current_app.config['UPLOADED_PAPERS_DEST'] + filename, 'rb')
conn = tinys3.Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY,
default_bucket=PAPER_BUCKET, tls=True,
endpoint=ENDPOINT)
conn.upload(filename, f)
def send_to_s3(filename):
app = current_app._get_current_object()
thr = Thread(target=send_async_file, args=[app, filename])
thr.start()
return thr
|
999,145 | 991e771c104583fd339864de810fecc29d4cf57a | class Data(object):
def __init__(self):
self.path = []
self.poisition = (0,0)
def fed_data(self,data):
self.path = data
def get_path(self):
return self.path
def get_position(self):
return self.poisition
class Calc(object):
def __init__(self):
self.position = (0,0)
self.next_pos = (0,0)
def set_pos(self,pos):
self.position = pos
def set_nextpos(self,pos):
self.next_pos = pos
def next_direction(self):
return 0.0
def next_lenght(self):
return 0.0
class Control:
def run(self):
return 0
if __name__ =='__main__':
run()
|
999,146 | e0de855ad17a4abe30abeec2ea3b73516fee9b89 | import os.path
import io_evd.tsv
import data_transform.nMer
import classify.per_epitope
class ReadOptions:
def __init__(self, \
cdr3b_seq_field=None, \
cdr3a_seq_field=None, \
vb_gene_field=None, \
db_gene_field=None, \
jb_gene_field=None, \
va_gene_field=None, \
ja_gene_field=None):
self._cdr3b_seq_field = cdr3b_seq_field
self._cdr3a_seq_field = cdr3a_seq_field
self._vb_gene_field = vb_gene_field
self._db_gene_field = db_gene_field
self._jb_gene_field = jb_gene_field
self._va_gene_field = va_gene_field
self._ja_gene_field = ja_gene_field
def set_cdr3b_seq_field(self, cdr3b_seq_field):
self._cdr3b_seq_field = cdr3b_seq_field
def set_cdr3a_seq_field(self, cdr3a_seq_field):
self._cdr3a_seq_field = cdr3a_seq_field
def set_vb_gene_field(self, vb_gene_field):
self._vb_gene_field = vb_gene_field
def set_db_gene_field(self, db_gene_field):
self._db_gene_field = db_gene_field
def set_jb_gene_field(self, jb_gene_field):
self._jb_gene_field = jb_gene_field
def set_va_gene_field(self, va_gene_field):
self._va_gene_field = va_gene_field
def set_ja_gene_field(self, ja_gene_field):
self._ja_gene_field = ja_gene_field
def get_cdr3b_seq_field(self):
return self._cdr3b_seq_field
def get_cdr3a_seq_field(self):
return self._cdr3a_seq_field
def get_vb_gene_field(self):
return self._vb_gene_field
def get_db_gene_field(self):
return self._db_gene_field
def get_jb_gene_field(self):
return self._jb_gene_field
def get_va_gene_field(self):
return self._va_gene_field
def get_ja_gene_field(self):
return self._ja_gene_field
class WriteOptions:
def __init__(self, numEpi=10, showWeights=True, numWeightsMax=25):
self._numEpi = numEpi
self._showWeights=showWeights
self._numWeightsMax=numWeightsMax
def set_numEpi(self, numEpi):
self._numEpi=numEpi
def set_aaWeights(self, showWeights):
self._showWeights=showWeights
def set_numWeightsMax(self, numWeightsMax):
self._numWeightsMax=numWeightsMax
def get_numEpi(self):
return self._numEpi
def get_showWeights(self):
return self._showWeights
def get_numWeightsMax(self):
return self._numWeightsMax
class ModelParams:
def __init__(self, aaVecFile=None, gmmFile=None, epitopeFile=None):
self._aaVec = aaVecFile
self._gmm = gmmFile
self._epitopes = epitopeFile
def set_aaVec_file(self, aaVecFile):
self._aaVec = aaVecFile
def set_gmm_file(self, gmmFile):
self._gmm = gmmFile
def set_epitopes_file(self, epiFile):
self._epitopes = epiFile
def get_aaVec_file(self):
return self._aaVec
def get_gmm_file(self):
return self._gmm
def get_epitopes_file(self):
return self._epitopes
class Stream:
epiNamePrefix = "Epitope_"
epiPostPrefix = "Posterior_"
aaWeightsPrefix = "aaWeights_"
def __init__(self, \
inFile=None, \
outFile=None, \
readOptions=ReadOptions(), \
writeOptions=WriteOptions(), \
modelParams=ModelParams()):
self.set_inFile(inFile)
self.set_outFile(outFile)
self.set_readOptions(readOptions)
self.set_writeOptions(writeOptions)
self.set_modelParams(modelParams)
self._load_model()
def set_inFile(self, inFile):
if inFile == None:
self._inFile = None
else:
if os.path.isfile(inFile):
self._inFile = inFile
else:
raise IOError("inFile not found")
def set_outFile(self, outFile):
self._outFile = outFile
def set_readOptions(self, readOptions):
if isinstance(readOptions, ReadOptions):
self._readOptions = readOptions
else:
raise IOError("readOptions is not the proper class type")
def set_writeOptions(self, writeOptions):
if isinstance(writeOptions, WriteOptions):
self._writeOptions = writeOptions
else:
raise IOError("writeOptions is not the proper class type")
def set_modelParams(self, modelParams):
if isinstance(modelParams, ModelParams):
self._modelParams = modelParams
else:
raise IOError("modelParams is not the proper class type")
def add(self):
if self._model == None:
self._load_model()
ioStream = io_evd.tsv.Data(inFile=self._inFile, outFile=self._outFile)
ioStream.set_in_fields(self._get_infield_list())
outFieldNames = self._get_outfield_list()
ioStream.set_out_fields(outFieldNames)
for entry in ioStream:
cdrSeq = entry[0]
print cdrSeq
(epiNames, epiPosts, aaWeights) = self._model.get_epitopes(cdrSeq)
outEntries = self._epiModelOut_2_out_fields(epiName=epiNames, epiPost=epiPosts, aaWeights=aaWeights)
ioStream.write_out_fields(outEntries)
ioStream.write()
def _load_model(self):
if self._modelParams.get_aaVec_file() == None or \
self._modelParams.get_gmm_file() == None or \
self._modelParams.get_epitopes_file() == None:
self._model = None
else:
self._model = Model(modelParams=self._modelParams)
def _get_infield_list(self):
inFieldList = []
inFieldList.append(self._readOptions.get_cdr3b_seq_field())
return inFieldList
def _get_outfield_list(self):
outFields = []
for i in range(self._writeOptions.get_numEpi()):
outFields.append("%s%02d"%(Stream.epiNamePrefix, i+1))
for i in range(self._writeOptions.get_numEpi()):
outFields.append("%s%02d"%(Stream.epiPostPrefix, i+1))
if self._writeOptions.get_showWeights():
for i in range(self._writeOptions.get_numWeightsMax()):
outFields.append("%s%02d"%(Stream.aaWeightsPrefix, i+1))
return outFields
def _epiModelOut_2_out_fields(self, epiName=None, epiPost=None, aaWeights=None):
outFields = []
for i in range(self._writeOptions.get_numEpi()):
outFields.append(epiName[i])
for i in range(self._writeOptions.get_numEpi()):
outFields.append("%.4f"%epiPost[i])
if self._writeOptions.get_showWeights():
numAAshow = min(self._writeOptions.get_numWeightsMax(), len(aaWeights))
numAApad = self._writeOptions.get_numWeightsMax() - numAAshow
for i in range(numAAshow):
outFields.append("%.4f"%aaWeights[i])
for i in range(numAApad):
outFields.append("%.4f"%0)
return outFields
class Model:
def __init__(self, \
modelParams=ModelParams()):
self.set_symVecFile(modelParams.get_aaVec_file())
self.set_gmmModelFile(modelParams.get_gmm_file())
self.set_epiModelFile(modelParams.get_epitopes_file())
self._models_loaded = False
def _load_models(self):
if self._symVecFile == None or self._gmmModelFile == None or self._epiModelFile == None:
raise IOError("Not all model files specified properly")
self._gmmModel = data_transform.nMer.Gmm(modelFile=self._gmmModelFile)
self._nMerLen = self._gmmModel.nMerLen
self._nMerSpan = self._gmmModel.nMerSpan
self._aaVecSpace = data_transform.nMer.AaVecSpace( \
symVecFile=self._symVecFile, \
nMerLen=self._nMerLen, \
nMerSpan=self._nMerSpan)
self._epiModel = classify.per_epitope.Model(self._epiModelFile, self._gmmModel.num_mix())
self._models_loaded = True
def set_symVecFile(self, symVecFile=None):
if symVecFile == None:
self._symVecFile = None
else:
if os.path.isfile(symVecFile):
self._symVecFile = symVecFile
else:
self._symVecFile = None
raise IOError("symVecFile not found.")
def set_gmmModelFile(self, gmmModelFile=None):
if gmmModelFile == None:
self._gmmModelFile = None
else:
if os.path.isfile(gmmModelFile):
self._gmmModelFile = gmmModelFile
else:
self._gmmModelFile = None
raise IOError("gmmModelFile not found.")
def set_epiModelFile(self, epiModelFile=None):
if epiModelFile == None:
self._epiModelFile = None
else:
if os.path.isfile(epiModelFile):
self._epiModelFile = epiModelFile
else:
self._epiModelFile = None
raise IOError("epiModelFile not found.")
def get_epitopes(self, cdrSeq):
if not self._models_loaded:
self._load_models()
nMerMat = self._aaVecSpace.get_cdr_nmer_matrix(cdrSeq)
(postVec, postMat) = self._gmmModel.mat_2_cdrPosteriors(nMerMat)
(epiNames, epiPosts) = self._epiModel.get_posteriors(postVec)
aaWeights = self._epiModel.get_aa_weights(cdrSeq=cdrSeq, nMerMat=nMerMat, postMat=postMat, gmmModel=self._gmmModel, epiName=epiNames[0])
return (epiNames, epiPosts, aaWeights)
|
999,147 | 9984f4b14db8416c3d69d9b2b635e87e17675918 | ../hashUtils.py |
999,148 | f524bebbb0db85ba79d864dae83ae08063a310c6 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import spacy
import os
import pickle
from train_model import add_vectors, evaluate, main, train_spacy_model_from_zero
from train_model import preprocess_annotations, fit_dataframe
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
def show_model_stats(test_results, num_iter=100):
test_dict = {"f-score": [], "precision": [], "recall": []}
for tr in test_results:
for score, value in tr.items():
if score == "ents_f":
test_dict["f-score"].append(value)
elif score == "ents_p":
test_dict["precision"].append(value)
elif score == "ents_r":
test_dict["recall"].append(value)
test_df = pd.DataFrame(data=test_dict)
fig = plt.figure(figsize=(14, 8))
ax = sns.lineplot(data=test_df)
ax.set_title("Statistics of de_core_news_md in {} iterations".format(num_iter), fontsize=22)
ax.set_xlabel("Iteration", fontsize=18)
ax.set_ylabel("Value of score in %", fontsize=18)
ax.set_ylim(0, 100)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.grid()
# plt.show()
plt.close()
return test_df
# Hyperparameters
epochs = 30
dropout = 0.35
# In[4]:
TRAIN_DATA, TEST_DATA = pickle.load(open("train_test_data_for_all.pickle", 'rb'))
# In[5]:
# Read test document
with open("../../Daten/eval_docs/Doc_000.txt", 'r') as test_file:
test_doc = test_file.read()
# In[6]:
LABELS = [
"DrugName",
"Strength",
"Route",
"Frequency",
"Form",
"Dose_Amount",
"IntakeTime",
"Duration",
"Dispense_Amount",
"Refill",
"Necessity",
]
for n in range(10):
native_model_dir = "./models/de_core_news_md_lemma_{}/".format(n)
# ## Spacy de_core_news_md vectors
# In[7]:
# Train the NER of the de_core_news_md model
test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model="de_core_news_md",
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[8]:
native_df = show_model_stats(test_results=test_results)
pickle.dump(native_df, open("data/dataframes/stats_of_de_core_news_md_lemma_{}.pickle".format(n), 'wb'))
pickle.dump(test_results, open("data/scorer/scorer_de_core_news_md_lemma_{}.pickle".format(n), 'wb'))
# ## spacy lemmas vectors without german stopwords from nltk + de_core_news_md vectors
# In[10]:
word_vec_model = './models/slemmas_nltksw_plus_{}/'.format(n)
wordvec_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_nltksw_spacy_lemma_2.txt', lang=1, pipe=spacy.load("de_core_news_md"))
wv_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wordvec_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[11]:
del wordvec_model
wv_df = show_model_stats(test_results=wv_test_results)
pickle.dump(wv_df, open("data/dataframes/stats_of_spacy_lemma_nltksw_plus_{}.pickle".format(n), 'wb'))
pickle.dump(wv_test_results, open("data/scorer/scorer_spacy_lemma_nltksw_plus_{}.pickle".format(n), 'wb'))
# ## spacy lemmas vectors without german stopwords from nltk
# In[13]:
word_vec_model = './models/slemmas_nltksw_alone_{}/'.format(n)
only_model = spacy.load("de_core_news_md")
only_model.vocab.reset_vectors(width=300)
only_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_nltksw_spacy_lemma_2.txt', lang=1, pipe=only_model)
only_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=only_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[14]:
del only_model
only_df = show_model_stats(test_results=only_test_results)
pickle.dump(only_df, open("data/dataframes/stats_of_spacy_lemma_nltksw_alone_{}.pickle".format(n), 'wb'))
pickle.dump(only_test_results, open("data/scorer/scorer_spacy_lemma_nltksw_alone_{}.pickle".format(n), 'wb'))
# ## spacy lemmas vectors without german stopwords from spacy + de_core_news_md vectors
# In[16]:
word_vec_model = './models/slemmas_spacysw_plus_{}/'.format(n)
wordvec_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wgsw_spacy_lemma_2.txt', lang=1, pipe=spacy.load("de_core_news_md"))
wv_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wordvec_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[17]:
del wordvec_model
wv_df = show_model_stats(test_results=wv_test_results)
pickle.dump(wv_df, open("data/dataframes/stats_of_spacy_lemma_spacysw_plus_{}.pickle".format(n), 'wb'))
pickle.dump(wv_test_results, open("data/scorer/scorer_spacy_lemma_spacysw_plus_{}.pickle".format(n), 'wb'))
# ## spacy lemmas vectors without german stopwords from spacy
# In[19]:
word_vec_model = './models/slemmas_spacysw_alone_{}/'.format(n)
only_model = spacy.load("de_core_news_md")
only_model.vocab.reset_vectors(width=300)
only_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wgsw_spacy_lemma_2.txt', lang=1, pipe=only_model)
only_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=only_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[20]:
del only_model
only_df = show_model_stats(test_results=only_test_results)
pickle.dump(only_df, open("data/dataframes/stats_of_spacy_lemma_spacysw_alone_{}.pickle".format(n), 'wb'))
pickle.dump(only_test_results, open("data/scorer/scorer_spacy_lemma_spacysw_alone_{}.pickle".format(n), 'wb'))
# ## spacy lemmas vectors with german stopwords + de_core_news_md vectors
# In[22]:
word_vec_model = './models/slemmas_wsw_plus_{}/'.format(n)
wordvec_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wsw_spacy_lemma_2.txt', lang=1, pipe=spacy.load("de_core_news_md"))
wv_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wordvec_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[23]:
del wordvec_model
wv_df = show_model_stats(test_results=wv_test_results)
pickle.dump(wv_df, open("data/dataframes/stats_of_spacy_lemma_wsw_plus_{}.pickle".format(n), 'wb'))
pickle.dump(wv_test_results, open("data/scorer/scorer_spacy_lemma_wsw_plus_{}.pickle".format(n), 'wb'))
# ## spacy lemmas vectors with german stopwords
# In[25]:
word_vec_model = './models/slemmas_wsw_alone_{}/'.format(n)
only_model = spacy.load("de_core_news_md")
only_model.vocab.reset_vectors(width=300)
only_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wsw_spacy_lemma_2.txt', lang=1, pipe=only_model)
only_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=only_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[26]:
del only_model
only_df = show_model_stats(test_results=only_test_results)
pickle.dump(only_df, open("data/dataframes/stats_of_spacy_lemma_wsw_alone_{}.pickle".format(n), 'wb'))
pickle.dump(only_test_results, open("data/scorer/scorer_spacy_lemma_wsw_alone_{}.pickle".format(n), 'wb'))
# ## germalemmas vectors without german stopwords from nltk + de_core_news_md vectors
# In[28]:
word_vec_model = './models/germalemma_nltksw_plus_{}/'.format(n)
wordvec_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_nltksw_germalemma_2.txt', lang=1, pipe=spacy.load("de_core_news_md"))
wv_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wordvec_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[29]:
del wordvec_model
wv_df = show_model_stats(test_results=wv_test_results)
pickle.dump(wv_df, open("data/dataframes/stats_of_germalemma_nltksw_plus_{}.pickle".format(n), 'wb'))
pickle.dump(wv_test_results, open("data/scorer/scorer_germalemma_nltksw_plus_{}.pickle".format(n), 'wb'))
# ## germalemmas vectors without german stopwords from nltk
# In[31]:
word_vec_model = './models/germalemma_nltksw_alone_{}/'.format(n)
only_model = spacy.load("de_core_news_md")
only_model.vocab.reset_vectors(width=300)
only_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_nltksw_germalemma_2.txt', lang=1, pipe=only_model)
only_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=only_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[32]:
del only_model
only_df = show_model_stats(test_results=only_test_results)
pickle.dump(only_df, open("data/dataframes/stats_of_germalemma_nltksw_alone_{}.pickle".format(n), 'wb'))
pickle.dump(only_test_results, open("data/scorer/scorer_germalemma_nltksw_alone_{}.pickle".format(n), 'wb'))
# ## germalemmas vectors without german stopwords from spacy + de_core_news_md vectors
# In[34]:
word_vec_model = './models/germalemma_spacysw_plus_{}/'.format(n)
wordvec_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wgsw_germalemma_2.txt', lang=1, pipe=spacy.load("de_core_news_md"))
wv_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wordvec_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[35]:
del wordvec_model
wv_df = show_model_stats(test_results=wv_test_results)
pickle.dump(wv_df, open("data/dataframes/stats_of_germalemma_spacysw_plus_{}.pickle".format(n), 'wb'))
pickle.dump(wv_test_results, open("data/scorer/scorer_germalemma_spacysw_plus_{}.pickle".format(n), 'wb'))
# ## germalemmas vectors without german stopwords from spacy
# In[37]:
word_vec_model = './models/germalemma_spacysw_alone_{}/'.format(n)
only_model = spacy.load("de_core_news_md")
only_model.vocab.reset_vectors(width=300)
only_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wgsw_germalemma_2.txt', lang=1, pipe=only_model)
only_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=only_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[38]:
del only_model
only_df = show_model_stats(test_results=only_test_results)
pickle.dump(only_df, open("data/dataframes/stats_of_germalemma_spacysw_alone_{}.pickle".format(n), 'wb'))
pickle.dump(only_test_results, open("data/scorer/scorer_germalemma_spacysw_alone_{}.pickle".format(n), 'wb'))
# ## germalemmas vectors with german stopwords + de_core_news_md vectors
# In[40]:
word_vec_model = './models/germalemma_wsw_plus_{}/'.format(n)
wordvec_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wsw_germalemma_2.txt', lang=1, pipe=spacy.load("de_core_news_md"))
wv_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wordvec_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[41]:
del wordvec_model
wv_df = show_model_stats(test_results=wv_test_results)
pickle.dump(wv_df, open("data/dataframes/stats_of_germalemma_wsw_plus_{}.pickle".format(n), 'wb'))
# ## germalemmas vectors with german stopwords
# In[43]:
word_vec_model = './models/germalemma_wsw_alone_{}/'.format(n)
only_model = spacy.load("de_core_news_md")
only_model.vocab.reset_vectors(width=300)
only_model = add_vectors('../../Word2Vec-Versuche/preprocessing/lemma/word_vectors/spacy_preprocessed_wsw_spacy_lemma_2.txt', lang=1, pipe=only_model)
only_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=only_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[44]:
del only_model
only_df = show_model_stats(test_results=only_test_results)
pickle.dump(only_df, open("data/dataframes/stats_of_germalemma_wsw_alone_{}.pickle".format(n), 'wb'))
pickle.dump(only_test_results, open("data/scorer/scorer_germalemma_wsw_alone_{}.pickle".format(n), 'wb'))
# ## without vectors spacy model => Baseline
# In[46]:
word_vec_model = './models/lemma_baseline_{}/'.format(n)
wo_model = spacy.load("de_core_news_md")
wo_model.vocab.reset_vectors(width=300)
wo_test_results = train_spacy_model_from_zero(train_data=TRAIN_DATA, labels=LABELS,
model=wo_model,
validation=TEST_DATA, n_iter=epochs, dropout=dropout)
# In[47]:
del wo_model
wo_df = show_model_stats(test_results=wo_test_results)
pickle.dump(wo_df, open("data/dataframes/stats_of_lemma_baseline_{}.pickle".format(n), 'wb'))
pickle.dump(wo_test_results, open("data/scorer/scorer_lemma_baseline_{}.pickle".format(n), 'wb'))
# In[48]:
wo_df.describe()
# ## create pandas dataframe by concatenating all previous test dataframes
# In[2]:
# loading all previous dataframes
native_df = pickle.load(open("data/dataframes/stats_of_de_core_news_md_lemma_{}.pickle".format(n), 'rb'))
slemma_nltksw_plus = pickle.load(open("data/dataframes/stats_of_spacy_lemma_nltksw_plus_{}.pickle".format(n), 'rb'))
slemma_nltksw_alone = pickle.load(open("data/dataframes/stats_of_spacy_lemma_nltksw_alone_{}.pickle".format(n), 'rb'))
slemma_spacysw_plus = pickle.load(open("data/dataframes/stats_of_spacy_lemma_spacysw_plus_{}.pickle".format(n), 'rb'))
slemma_spacysw_alone = pickle.load(open("data/dataframes/stats_of_spacy_lemma_spacysw_alone_{}.pickle".format(n), 'rb'))
slemma_wsw_plus = pickle.load(open("data/dataframes/stats_of_spacy_lemma_wsw_plus_{}.pickle".format(n), 'rb'))
slemma_wsw_alone = pickle.load(open("data/dataframes/stats_of_spacy_lemma_wsw_alone_{}.pickle".format(n), 'rb'))
germalemma_nltksw_plus = pickle.load(open("data/dataframes/stats_of_germalemma_nltksw_plus_{}.pickle".format(n), 'rb'))
germalemma_nltksw_alone = pickle.load(open("data/dataframes/stats_of_germalemma_nltksw_alone_{}.pickle".format(n), 'rb'))
germalemma_spacysw_plus = pickle.load(open("data/dataframes/stats_of_germalemma_spacysw_plus_{}.pickle".format(n), 'rb'))
germalemma_spacysw_alone = pickle.load(open("data/dataframes/stats_of_germalemma_spacysw_alone_{}.pickle".format(n), 'rb'))
germalemma_wsw_plus = pickle.load(open("data/dataframes/stats_of_germalemma_wsw_plus_{}.pickle".format(n), 'rb'))
germalemma_wsw_alone = pickle.load(open("data/dataframes/stats_of_germalemma_wsw_alone_{}.pickle".format(n), 'rb'))
baseline = pickle.load(open("data/dataframes/stats_of_lemma_baseline_{}.pickle".format(n), 'rb'))
# In[3]:
comp_df = pd.concat([native_df.describe()["f-score"],
slemma_nltksw_plus.describe()["f-score"],
slemma_nltksw_alone.describe()["f-score"],
slemma_spacysw_plus.describe()["f-score"],
slemma_spacysw_alone.describe()["f-score"],
slemma_wsw_plus.describe()["f-score"],
slemma_wsw_alone.describe()["f-score"],
germalemma_nltksw_plus.describe()["f-score"],
germalemma_nltksw_alone.describe()["f-score"],
germalemma_spacysw_plus.describe()["f-score"],
germalemma_spacysw_alone.describe()["f-score"],
germalemma_wsw_plus.describe()["f-score"],
germalemma_wsw_alone.describe()["f-score"],
baseline.describe()["f-score"]
], axis=1)
merged = pd.concat([native_df["f-score"],
slemma_nltksw_plus["f-score"],
slemma_nltksw_alone["f-score"],
slemma_spacysw_plus["f-score"],
slemma_spacysw_alone["f-score"],
slemma_wsw_plus["f-score"],
slemma_wsw_alone["f-score"],
germalemma_nltksw_plus["f-score"],
germalemma_nltksw_alone["f-score"],
germalemma_spacysw_plus["f-score"],
germalemma_spacysw_alone["f-score"],
germalemma_wsw_plus["f-score"],
germalemma_wsw_alone["f-score"],
baseline["f-score"]
], axis=1)
dia_labels = [
"de_core_news_md_{}".format(n),
"spacy_lemma_nltksw_plus_{}".format(n),
"spacy_lemma_nltksw_alone_{}".format(n),
"spacy_lemma_spacysw_plus_{}".format(n),
"spacy_lemma_spacysw_alone_{}".format(n),
"spacy_lemma_wsw_plus_{}".format(n),
"spacy_lemma_wsw_alone_{}".format(n),
"germalemma_nltksw_plus_{}".format(n),
"germalemma_nltksw_alone_{}".format(n),
"germalemma_spacysw_plus_{}".format(n),
"germalemma_spacysw_alone_{}".format(n),
"germalemma_wsw_plus_{}".format(n),
"germalemma_wsw_alone_{}".format(n),
"baseline_{}".format(n)
]
comp_df.columns = dia_labels
merged.columns = dia_labels
fig = plt.figure(figsize=(14, 8))
ax = sns.lineplot(data=merged, dashes=False, palette=sns.color_palette("RdYlGn", n_colors=14))
ax.set_title("Vorverarbeitung: Lemmatisierung", fontsize=22)
ax.set_xlabel("Iteration des Trainingprozesses", fontsize=18)
ax.set_ylabel("F1-Score in %", fontsize=18)
ax.set_ylim(0, 100)
plt.grid()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.savefig("data/figures/lemma_train_{}.png".format(n), format='png')
# plt.show()
plt.close()
# In[7]:
fig2 = plt.figure(figsize=(14, 8))
ax2 = sns.lineplot(data=fit_dataframe(merged), dashes=False, palette=sns.color_palette("RdYlGn", n_colors=14))
ax2.set_title("Vorverarbeitung: Lemmatisierung fitted", fontsize=22)
ax2.set_xlabel("Iteration des Trainingprozesses", fontsize=18)
ax2.set_ylabel("F1-Score in %", fontsize=18)
ax2.set_ylim(0, 100)
plt.grid()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.savefig("data/figures/lemma_fitted_train_{}.png".format(n), format='png')
# plt.show()
plt.close()
# In[8]:
bar_df = comp_df[comp_df.index == 'max']
bar_df = bar_df.sort_values(by='max', axis=1)
fig = plt.figure(figsize=(14, 8))
ax = sns.barplot(data=bar_df, palette=sns.color_palette("RdYlGn", n_colors=14))
for i, cty in enumerate(bar_df.values.tolist()[0]):
ax.text(i, cty+0.5, round(cty, 1), horizontalalignment='center')
ax.set_title("Vorverarbeitung: Lemmatisierung Barplot", fontsize=22)
ax.set_xlabel("Wortvektormodell", fontsize=18)
ax.set_ylabel("Mittelwert F1-Score in %", fontsize=18)
ax.set_ylim(0, 100)
plt.xticks(fontsize=14, rotation=90)
plt.yticks(fontsize=14)
plt.savefig("data/figures/lemma_barplot_{}.png".format(n), format='png')
# plt.show()
plt.close()
# pickle.dump((TRAIN_DATA, TEST_DATA), open("train_test_data_for_all.pickle", 'wb'))
|
999,149 | 40c0f31f4137be2c8ffbc3565e2884a635616a22 | import sys
import re
import os
import shutil
import atexit
import json
import sys
import gc
import write_to_file
import first
import follow
import LL1
import Queue
import copy
import SLR_1_parser_table
import LR_0_parser_table
#import matplotlib.pyplot as plt
result_str = ""
visited = []
graph = {}
set_terminal = ()
set_non_terminal = ()
first_table = {}
follow_table = {}
follow_computed = {}
reverse_relation = {}
serial_productions = {}
serial_productions_2 = {}
action_table = {}
goto = {}
root = ""
log = ''
'''
Objective of the code :
1. This CODE takes input of the DOT graph ..
2. Then it calls creates calls DOMINATOR GRAPH ..
3. Then it calls the CYCLE finding routine
'''
def read_file(input_file) :
fHandle = open(input_file, 'r')
data = fHandle.read()
fHandle.close()
return data
#input file ends\
def get_data_parser_table(input_file) :
global graph, set_terminal, set_non_terminal, first_table, follow_table, root, action_table, goto, serial_productions, serial_productions_2
graph, set_terminal, set_non_terminal, first_table, follow_table, root, action_table, goto, serial_productions = LR_0_parser_table.get_data_parser_table(input_file)
print "******************"
print first_table
print "*****first*****"
print follow_table
print "*****follow*****"
print action_table
print "*****action******"
print goto
print "********goto*****"
print serial_productions
print "******serial_productions*****"
preprocess_serial_production()
print serial_productions_2
#definiton ends here
def update_aug(lists) :
return copy.copy(lists)
#end
def update_log(stack_top, stack, inp_top, inp, action) :
global log
inp_top = ""
stack_top = "" # this is done as we are using a legacy system and modified it to get our current work done
stack_cpy = copy.copy(stack)
#stack_cpy.reverse()
log += stack_top + "".join(map(str, stack_cpy)) + '\t\t\t\t' + inp_top + inp + '\t\t\t\t' + action + '\n'
#end update log
def maintain_stack(inp_top, stack) :
global action_table, goto, serial_productions_2, set_terminal, set_non_terminal
stack_top = stack[-1]
prod_no = action_table[stack_top][inp_top][1] # contains the production
prod_elem_2_tuple = serial_productions_2[prod_no]
for i in range(len(prod_elem_2_tuple[1]) * 2) :
if stack == [] :
return -1 # that is the stack has become prematurely empty
stack.pop()
#end loop
if stack == [] :
return -1
stack_top = stack[-1]
if not type(stack_top) == int :
return -2
non_terminal_of_prod = prod_elem_2_tuple[0]
if not non_terminal_of_prod in goto[stack_top] :
return -3
goto_var = goto[stack_top][non_terminal_of_prod]
stack.append(non_terminal_of_prod)
stack.append(goto_var)
return 1
#end method
def preprocess_serial_production():
global serial_productions, serial_productions_2
for elem in serial_productions : # this is the non terminal
for elem2 in serial_productions[elem] : # this is the production
serial_productions_2.update({serial_productions[elem][elem2] : [elem, elem2]})
# end loop
# end outer loop
# end methid
def parse_input(data) :
global LL1_table, log, set_terminal, set_non_terminal, root, log, action_table, goto, serial_productions_2, serial_productions
action = ""
log = ""
preprocess_serial_production()
stack = [0]
stack_top = 0
inp = data + '$'
#stack.append(root)
print stack
print inp
accept = False
while True :
inp_top = inp[0]
stack_top = stack[-1]
print inp_top
print stack_top
print action_table[stack_top][inp_top][0]
if not stack_top in action_table : # that is parsing error encounterd
accept = False
action = 'error111'
update_log(stack_top, stack, inp_top, inp, action)
break
elif not inp_top in action_table[stack_top] : # that is parsing error encounterd
accept = False
action = 'error222'
update_log(stack_top, stack, inp_top, inp, action)
break
elif inp_top in set_terminal and action_table[stack_top][inp_top][0] == 'A':
action = 'Accepted'
update_log(stack_top, stack, inp_top, inp, action)
accept = True
break
elif inp_top in set_terminal and action_table[stack_top][inp_top][0] == 'S' :
action = 'Shift'
update_log(stack_top, stack, inp_top, inp, action)
stack.append(inp_top)
stack.append(action_table[stack_top][inp_top][1])
inp = inp[1:]
inp_top = inp[0]
elif inp_top in set_terminal and action_table[stack_top][inp_top][0] == 'R' :
action = 'Reduce'
prod_no = action_table[stack_top][inp_top][1]
prod_elem_2_tuple = serial_productions_2[prod_no]
action += " " + prod_elem_2_tuple[0] + " -> " + prod_elem_2_tuple[1]
update_log(stack_top, stack, inp_top, inp, action)
ret_val = maintain_stack(inp_top, stack)
if not ret_val == 1 :
print ret_val
action = 'error'
update_log(stack_top, stack, inp_top, inp, action)
break
return accept
#end method
def executer(input_file, input_string, output_file) :
global graph, set_terminal, set_non_terminal, first_table, follow_table, root, log, serial_productions, action_table, goto
get_data_parser_table(input_file)
data = read_file(input_string)
data = data.strip()
print data
parse_result = parse_input(data)
print log
#print ok
#result_str = prepare_output()
if parse_result == True :
print "Accepted"
else :
print "Error in parsing!!"
write_to_file.write_output(filename = output_file, inp = log, typer="str")
#end method
def main() :
args = sys.argv[1:]
if not args:
print 'usage1: [--input_file <DOT_file>] [--input_string <DOT_file>] [--output_file <cycles>]'
#print 'usage2: [old_filename]'
sys.exit(1)
#-----------------------------------------------------------------------------------------------------------------------------------
#INPUT filename
if args[0] == "--input_file" :
input_file = args[1]
del args[0:2]
else :
print 'usage1: [--input_file <DOT_file>] [--input_string <DOT_file>] [--output_file <cycles>]'
sys.exit(1)
#-----------------------------------------------------------------------------------------------------------------------------------
if args[0] == "--input_string" :
input_string = args[1]
del args[0:2]
else :
print 'usage1: [--input_file <DOT_file>] [--input_string <DOT_file>] [--output_file <cycles>]'
sys.exit(1)
#-----------------------------------------------------------------------------------------------------------------------------------
#INPUT filename
if args[0] == "--output_file" :
output_file = args[1]
del args[0:2]
else :
print 'usage1: [--input_file <DOT_file>] [--input_string <DOT_file>] [--output_file <cycles>]'
sys.exit(1)
#-------------------------------------------------------------------------------------------------------------------------------------
executer(input_file = input_file, input_string = input_string, output_file = output_file)
if __name__ == '__main__' :
main() |
999,150 | c50c2d1c6ac773a25a01c58cde373a9c935b38a7 | import datetime
from sqlalchemy import Column, String, Integer, DateTime, Table, ForeignKey
from .constants import STRING_SIZE
class CustomBase(object):
''' Define base atribuetes for all table '''
id = Column(Integer, primary_key=True, autoincrement=True)
create_date = Column(DateTime(timezone=False), default=datetime.datetime.utcnow)
modify_date = Column(DateTime(timezone=False))
create_by = Column(String(STRING_SIZE), default="system")
modify_by = Column(String(STRING_SIZE))
|
999,151 | a857acf390c0de60415ae500804b33efa31e943f |
# waiting for imports_1, please do not remove this line
from flask import Flask
# waiting for imports_2, please do not remove this line
app = Flask(__name__)
from mfm import routes
|
999,152 | c5179bea04cc56131bf1e76756e279ecbb4e0d6e | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Literal, Optional
import numpy as np
from great_expectations.compatibility.typing_extensions import override
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.column_aggregate_metric_provider import (
ColumnAggregateMetricProvider,
)
from great_expectations.expectations.metrics.metric_provider import metric_value
from great_expectations.util import convert_ndarray_to_datetime_dtype_best_effort
from great_expectations.validator.metric_configuration import MetricConfiguration
if TYPE_CHECKING:
import numpy.typing as npt
from great_expectations.core import ExpectationConfiguration
class ColumnPartition(ColumnAggregateMetricProvider):
metric_name = "column.partition"
value_keys = ("bins", "n_bins", "allow_relative_error")
default_kwarg_values = {
"bins": "uniform",
"n_bins": 10,
"allow_relative_error": False,
}
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: PLR0913
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
bins = metric_value_kwargs.get("bins", cls.default_kwarg_values["bins"])
n_bins = metric_value_kwargs.get("n_bins", cls.default_kwarg_values["n_bins"])
return _get_column_partition_using_metrics(
bins=bins, n_bins=n_bins, _metrics=metrics
)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy( # noqa: PLR0913
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
bins = metric_value_kwargs.get("bins", cls.default_kwarg_values["bins"])
n_bins = metric_value_kwargs.get("n_bins", cls.default_kwarg_values["n_bins"])
return _get_column_partition_using_metrics(
bins=bins, n_bins=n_bins, _metrics=metrics
)
@metric_value(engine=SparkDFExecutionEngine)
def _spark( # noqa: PLR0913
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
):
bins = metric_value_kwargs.get("bins", cls.default_kwarg_values["bins"])
n_bins = metric_value_kwargs.get("n_bins", cls.default_kwarg_values["n_bins"])
return _get_column_partition_using_metrics(
bins=bins, n_bins=n_bins, _metrics=metrics
)
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
bins = metric.metric_value_kwargs.get("bins", cls.default_kwarg_values["bins"])
n_bins = metric.metric_value_kwargs.get(
"n_bins", cls.default_kwarg_values["n_bins"]
)
allow_relative_error = metric.metric_value_kwargs["allow_relative_error"]
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if bins == "uniform":
dependencies["column.min"] = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
dependencies["column.max"] = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
elif bins in ["ntile", "quantile", "percentile"]:
dependencies["column.quantile_values"] = MetricConfiguration(
metric_name="column.quantile_values",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"quantiles": np.linspace(start=0, stop=1, num=n_bins + 1).tolist(),
"allow_relative_error": allow_relative_error,
},
)
elif bins == "auto":
dependencies["column_values.nonnull.count"] = MetricConfiguration(
metric_name="column_values.nonnull.count",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
dependencies["column.quantile_values"] = MetricConfiguration(
metric_name="column.quantile_values",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs={
"quantiles": (0.0, 0.25, 0.75, 1.0),
"allow_relative_error": allow_relative_error,
},
)
else:
raise ValueError("Invalid parameter for bins argument")
return dependencies
def _get_column_partition_using_metrics(
bins: Literal["uniform", "ntile", "quantile", "percentile", "auto"],
n_bins: int,
_metrics: dict,
) -> list | npt.NDArray:
result_bins: list | npt.NDArray
if bins == "uniform":
min_ = _metrics["column.min"]
max_ = _metrics["column.max"]
original_ndarray_is_datetime_type: bool
conversion_ndarray_to_datetime_type_performed: bool
min_max_values: npt.NDArray | list
(
original_ndarray_is_datetime_type,
conversion_ndarray_to_datetime_type_performed,
min_max_values,
) = convert_ndarray_to_datetime_dtype_best_effort(
data=[min_, max_], # type: ignore[arg-type] # expects NDArray
parse_strings_as_datetimes=True,
)
ndarray_is_datetime_type: bool = (
original_ndarray_is_datetime_type
or conversion_ndarray_to_datetime_type_performed
)
min_ = min_max_values[0]
max_ = min_max_values[1]
result_bins = _determine_bins_using_proper_units( # type: ignore[assignment] # TODO: ensure not None
ndarray_is_datetime_type=ndarray_is_datetime_type,
n_bins=n_bins,
min_=min_,
max_=max_,
)
elif bins in ["ntile", "quantile", "percentile"]:
result_bins = _metrics["column.quantile_values"]
elif bins == "auto":
# Use the method from numpy histogram_bin_edges
nonnull_count = _metrics["column_values.nonnull.count"]
sturges = np.log2(1.0 * nonnull_count + 1.0)
min_, _25, _75, max_ = _metrics["column.quantile_values"]
box_plot_values: npt.NDArray
(
original_ndarray_is_datetime_type,
conversion_ndarray_to_datetime_type_performed,
box_plot_values,
) = convert_ndarray_to_datetime_dtype_best_effort(
data=[min_, _25, _75, max_], # type: ignore[arg-type] # expects NDArray
parse_strings_as_datetimes=True,
)
ndarray_is_datetime_type = (
original_ndarray_is_datetime_type
or conversion_ndarray_to_datetime_type_performed
)
min_ = box_plot_values[0]
_25 = box_plot_values[1]
_75 = box_plot_values[2]
max_ = box_plot_values[3]
if ndarray_is_datetime_type:
iqr = _75.timestamp() - _25.timestamp()
min_as_float_ = min_.timestamp()
max_as_float_ = max_.timestamp()
else:
iqr = _75 - _25
min_as_float_ = min_
max_as_float_ = max_
if (
iqr < 1.0e-10 # noqa: PLR2004
): # Consider IQR 0 and do not use variance-based estimator
n_bins = int(np.ceil(sturges))
else: # noqa: PLR5501
if nonnull_count == 0:
n_bins = 0
else:
fd = (2 * float(iqr)) / (nonnull_count ** (1.0 / 3.0))
n_bins = max(
int(np.ceil(sturges)),
int(np.ceil(float(max_as_float_ - min_as_float_) / fd)),
)
result_bins = _determine_bins_using_proper_units( # type: ignore[assignment] # need overloads to ensure not None
ndarray_is_datetime_type=ndarray_is_datetime_type,
n_bins=n_bins,
min_=min_,
max_=max_,
)
else:
raise ValueError("Invalid parameter for bins argument")
return result_bins
def _determine_bins_using_proper_units(
ndarray_is_datetime_type: bool, n_bins: int, min_: Any, max_: Any
) -> list | npt.NDArray | None:
if ndarray_is_datetime_type:
if n_bins == 0:
bins = [min_]
else:
delta_t = (max_ - min_) / n_bins
bins = []
for idx in range(n_bins + 1):
bins.append(min_ + idx * delta_t)
else:
# PRECISION NOTE: some implementations of quantiles could produce
# varying levels of precision (e.g. a NUMERIC column producing
# Decimal from a SQLAlchemy source, so we cast to float for numpy)
if min_ is None or max_ is None:
return None
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1).tolist()
return bins
|
999,153 | 90324a4b0f808253e5ee37784baa772c7bb40f11 | # coding: utf-8
# This function computes the body mass index (BMI),
# given the height (in meter) and weight (in kg) of a person.
def bodyMassIndex(height, weight):
# Complete the function definition...
bmi = ...
return bmi
# This function returns the BMI category acording to this table:
# BMI: <18.5 [18.5, 25[ [25, 30[ 30 or greater
# Category: Underweight Normal weight Overweight Obesity
def bmiCategory(bmi):
# Complete the function definition...
...
# This is the main function
def main():
print("Índice de Massa Corporal")
altura = float(input("Altura (m)? "))
peso = float(input("Peso (kg)? "))
# Complete the function calls...
imc = bodyMassIndex(...)
cat = ...
print("BMI:", imc, "kg/m2")
print("BMI category:", cat)
# Program starts executing here
main()
|
999,154 | 2fd34374181728550ac2d8207fbc1c3949228b10 | import pygame, os, sys, random
black = ( 0, 0, 0)
white = ( 255, 255, 255)
red = ( 255, 0, 0)
lime = ( 0, 255, 0)
blue = ( 0, 0, 255)
yellow = ( 255, 255, 0)
cyan = ( 0, 255, 255)
magenta = ( 255, 0, 255)
silver = ( 192, 192, 192)
gray = ( 128, 128, 128)
darkred = ( 128, 0, 0)
olive = ( 128, 128, 0)
green = ( 0, 128, 0)
purple = ( 128, 0, 128)
darkaqua = ( 0, 128, 128)
navyblue = ( 0, 0, 128)
screenSize = (480, 640)
title = 'Platformer'
surface = pygame.display.set_mode(screenSize)
pygame.display.set_caption(title)
clock = pygame.time.Clock()
FPS = 60
gameFolder = os.path.dirname(__file__)
imgFolder = os.path.join(gameFolder, 'img')
sndFolder = os.path.join(gameFolder, 'snd')
#camera
cameraSpeed = 2
#playerattributes
gravity = 0.65
jumpPower = 10
#pipeattributes
pipeWidth = 100
pipeGapSize = 200
spaceBetweenPipePairs = 300
firstPipePair = 300
def waitForPlayerInput():
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYUP:
return
def terminate():
pygame.quit()
sys.exit()
def drawText(text, color, x, y, pos, size = 32):
font = pygame.font.match_font('arial')
font = pygame.font.Font(font, size)
textobj = font.render(text, True, color)
textRect = textobj.get_rect()
if pos == 'topleft':
textRext.topleft = (x,y)
elif pos == 'topright':
textRect.topright = (x,y)
elif pos == 'topmid':
textRect.centerx = x
textRect.top = y
elif pos == 'bottomleft':
textRect.bottomleft = (x,y)
elif pos == 'bottomright':
textRext.bottomright = (x,y)
elif pos == 'bottommid':
textRect.centerx = x
textRect.bottom = y
elif pos == 'midleft':
textRect.left = x
textRect.centery = y
elif pos == 'center':
textRect.center = (x,y)
elif pos == 'midright':
textRect.centery = y
textRect.right = x
else:
print('something went wrong! your input was ' + pos)
surface.blit(textobj, textRect)
|
999,155 | 72a409dda74a302b76fe8111fca0b28455e58a8a | import numpy as np
from Galois_Field_mod_2.functions import strip_zeros, check_type
def xor(a, b):
return np.logical_xor(a, b, dtype='uint8').astype("uint8")
def gf2_add(a, b):
a, b = check_type(a, b)
a, b = strip_zeros(a), strip_zeros(b)
N = len(a)
D = len(b)
if N == D:
res = xor(a, b)
elif N > D:
res = np.concatenate((xor(a[:D], b), a[D:]))
else:
res = np.concatenate((xor(a, b[:N]), b[N:]))
return strip_zeros(res)
|
999,156 | 04164c3107e662d651802db5247909ac39d78282 | '''
Estimate the P matrix:
Form the A matrix : 2N x 12 matrix
Estimate P with the right singular vector of V in svd(A)
Update with a nonlinear estimation routine
Predict on still balls on the table
'''
import pickle
import numpy as np
import scipy.linalg as linalg
import json
import os
import sys
#from sklearn import linear_model
import find_balls as fball
import cv2
import calibrate_nonlin as cal_non
sys.path.append('./python')
def load_pixels_and_pos(dataset, img_range, cam_range):
''' Load pixels of both cameras and 3d positions from pickled dictionary'''
ball_locs = dict()
pickle_file = "python/ball_locs_" + dataset + '_' + \
str(img_range) + "_" + str(cam_range) + ".pickle"
file_obj = open(pickle_file, 'r')
ball_locs = pickle.load(file_obj)
file_obj.close()
pixels_0 = np.zeros((len(ball_locs), 2))
pixels_1 = np.zeros((len(ball_locs), 2))
pos3d = np.zeros((len(ball_locs), 3))
for i, tuples in enumerate(ball_locs.values()):
pixel = tuples[0]
pos = tuples[1]
pixels_0[i, :] = np.array(pixel[0:2])
pixels_1[i, :] = np.array(pixel[2:])
pos3d[i, :] = np.array(pos)
return pixels_0, pixels_1, pos3d
def normalize_pixels_and_pos3d(pixels, pos3d):
''' normalize the matrices before SVD '''
N = pixels.shape[0]
# normalize the images
mean_pix = np.sum(pixels, axis=0)/N
d_bar = np.sum(
np.sqrt((pixels[:, 0]-mean_pix[0])**2 + (pixels[:, 1]-mean_pix[1])**2))
T = np.zeros((3, 3))
T[0, 0] = np.sqrt(2)/d_bar
T[1, 1] = T[0, 0]
T[2, 2] = 1.0
T[0, 2] = -np.sqrt(2) * mean_pix[0]/d_bar
T[1, 2] = -np.sqrt(2) * mean_pix[1]/d_bar
# normalize the 3d positions
mean_pos = np.sum(pos3d, axis=0)/N
D_bar = np.sum(np.sqrt((pos3d[:, 0]-mean_pos[0])**2 +
(pos3d[:, 1]-mean_pos[1])**2 + (pos3d[:, 2]-mean_pos[2])**2))
U = np.zeros((4, 4))
U[0, 0] = U[1, 1] = U[2, 2] = np.sqrt(3)/D_bar
U[3, 3] = 1.0
U[0, 3] = -np.sqrt(3)*mean_pos[0]/D_bar
U[1, 3] = -np.sqrt(3)*mean_pos[1]/D_bar
U[2, 3] = -np.sqrt(3)*mean_pos[2]/D_bar
# form the A matrices
pixels = np.dot(T, np.vstack((pixels.T, np.ones((1, N)))))
pos3d = np.dot(U, np.vstack((pos3d.T, np.ones((1, N)))))
return pixels, pos3d, T, U
def estimate_proj_mat_linear(pixels, pos3d):
''' Linear estimation of P matrix using SVD decomposition of
A matrix '''
N = pixels.shape[0]
pixels, pos3d, T, U = normalize_pixels_and_pos3d(pixels, pos3d)
A = np.zeros((2*N, 12))
for i in range(N):
a = pos3d[:, i] # a = np.hstack((pos3d[:, i], 1.0))
A[2*i, 0:4] = a
A[2*i, 8:] = -pixels[0, i]*a
A[2*i+1, 4:8] = a
A[2*i+1, 8:] = -pixels[1, i]*a
_, S, Vh = np.linalg.svd(A, full_matrices=True)
P = Vh[-1, :]
P = P.reshape((3, 4), order='C')
# renormalize
P = np.linalg.solve(T, P.dot(U))
return P
def test_score_over_table(loc_pred):
'''
test prediction accuracy over table by calculating
z- score: total deviation
y score: average distance of points on robot court [0-8 inclusive]
should be roughly table_length/4
x score: balls are on the edge always so, [-x,0,x] mesh
Geometry of placed balls
[0 1 2
5 4 3
6 7 8 (close to net)
----- [net]
10 9 (close to net)
'''
table_length = 2.76
table_width = 1.525
x_edge = table_width/2.0
x_center = 0.0
center_balls = [1, 4, 7]
left_balls = [0, 5, 6]
right_balls = [2, 3, 8]
xdifs = loc_pred[center_balls, 0] - x_center + \
loc_pred[left_balls, 0] - (-x_edge) + \
loc_pred[right_balls, 0] - x_edge
ydifs = np.sum(
np.abs(loc_pred[[0, 1, 2], 1] - loc_pred[[5, 4, 3], 1]) - table_length/4.0 +
np.abs(loc_pred[[5, 4, 3], 1] - loc_pred[[6, 7, 8], 1]) - table_length/4.0)
zdifs = np.diff(loc_pred[:, -1])
return np.sum(zdifs*zdifs) + np.sum(xdifs*xdifs) + np.sum(ydifs*ydifs)
def eval_proj_error(P, pts2d, pts3d):
''' Return residual of fitting to pixels given theta parameters
a.k.a projection error'''
N = pts3d.shape[0]
pts4d = np.vstack((pts3d.T, np.ones((1, N))))
proj_pts = np.dot(P, pts4d)
difs = pts2d.T - proj_pts[0:-1, :]
res = np.sum(difs*difs)
print('residual:', res)
def eval_on_still_balls(P0, P1):
''' Evaluate camera models by triangulating to predict still balls'''
# predict 3d ball pos on still balls
# find ball locations for cameras 0 and 1
# check if predictions make sense
# for instance table_length = 2.74 m
img_path = os.environ['HOME'] + '/Dropbox/capture_train/still'
ball_dict = fball.find_balls(
img_path, ranges=[1, 11], cams=[0, 1], prefix='cam')
pixels = np.array(ball_dict.values())
print('pixels for predicting 3d points:')
print(pixels)
P0 = P0.astype(float)
P1 = P1.astype(float)
pixels = pixels.astype(float)
points4d = cv2.triangulatePoints(
P0, P1, pixels[:, 0:2].T, pixels[:, 2:].T)
# normalize
points3d = points4d[0:-1, :] / points4d[-1, :]
# LINEAR ALGEBRAIC TRIANGULATION
# is exactly the same as cv2.triangulatePoints
'''
A = np.zeros((4, 4))
points4d_mine = np.zeros(points4d.shape)
for i in range(11):
A[0, :] = pixels[i, 0]*P0[-1, :] - P0[0, :]
A[1, :] = pixels[i, 1]*P0[-1, :] - P0[1, :]
A[2, :] = pixels[i, 2]*P1[-1, :] - P1[0, :]
A[3, :] = pixels[i, 3]*P1[-1, :] - P1[1, :]
U, S, Vh = np.linalg.svd(A, full_matrices=True)
points4d_mine[:, i] = Vh[-1, :]
points3d_mine = points4d_mine[0:-1, :] / points4d_mine[-1, :]
print(points3d_mine.T)
'''
print('pred 3d points:')
print(points3d.T)
print('score over table:', test_score_over_table(points3d.T))
def decompose_proj_mat(P):
''' Decompose projection matrix into calib mat A, rotation R and translation t'''
'''
# BUGGY METHOD
B = P[:, 0:-1]
b = P[:, -1]
K = B.dot(B.T)
scale = K[2, 2]
K = K / K[2, 2] # normalize
A = np.zeros((3, 3))
A[0, 2] = K[0, 2] # u0
A[1, 2] = K[1, 2] # v0
A[1, 1] = np.sqrt(K[1, 1] - A[1, 2]**2) # fy
A[0, 1] = (K[1, 0] - (A[0, 2]*A[1, 2]))/A[1, 1] # shear
A[0, 0] = np.sqrt(K[0, 0] - A[0, 2]**2 - A[0, 1]**2) # fx
A[2, 2] = 1.0
R = np.linalg.solve(A, B)
t = np.linalg.solve(A, b)
return A, R, t, scale
'''
# use instead RQ transform
K, R = linalg.rq(P[:, :-1])
t = linalg.solve(K, P[:, -1])
# scale = K[-1, 1]
return K, R, t
def form_rot_matrix(euler_angles):
''' Form the rotation matrix from 3 euler angles
for some reson we have to also take transpose!!'''
mat0 = np.eye(3)
mat0[1, 1] = np.cos(euler_angles[0])
mat0[1, 2] = np.sin(euler_angles[0])
mat0[2, 1] = -mat0[1, 2]
mat0[2, 2] = mat0[1, 1]
mat1 = np.eye(3)
mat1[0, 0] = np.cos(euler_angles[1])
mat1[0, 2] = -np.sin(euler_angles[1])
mat1[2, 0] = -mat1[0, 2]
mat1[2, 2] = mat1[0, 0]
mat2 = np.eye(3)
mat2[0, 0] = np.cos(euler_angles[2])
mat2[0, 1] = np.sin(euler_angles[2])
mat2[1, 0] = -mat2[0, 1]
mat2[1, 1] = mat2[0, 0]
return np.dot(mat0, np.dot(mat1, mat2)).T
def form_extrinsic_mat(extrinsic_dict):
''' form the matrix composed of rotation and translation (scaling is 1)'''
euler_angles = extrinsic_dict['euler_angles']
trans = extrinsic_dict['trans_vector'][np.newaxis].T
rot_mat = form_rot_matrix(euler_angles)
return np.hstack((rot_mat, trans))
def form_intrinsic_mat(intrinsic_dict):
''' forming the camera (intrinsic) matrix, and scaling'''
mat = np.zeros((3, 3))
mat[0, 0] = intrinsic_dict['fx']
mat[0, 1] = intrinsic_dict['shear']
mat[0, 2] = intrinsic_dict['u0']
mat[1, 1] = intrinsic_dict['fy']
mat[1, 2] = intrinsic_dict['v0']
mat[2, 2] = intrinsic_dict['scale']
return mat
pixels_0_red, pixels_1_red, pos3d_red = load_pixels_and_pos(
dataset='red', img_range=(1510, 6080), cam_range=(0, 1))
pixels_0_black, pixels_1_black, pos3d_black = load_pixels_and_pos(
dataset='black', img_range=(750, 7380), cam_range=(0, 1))
pixels_0 = pixels_0_red
pixels_1 = pixels_1_red
pos3d = pos3d_red
P0 = estimate_proj_mat_linear(pixels_0, pos3d)
P1 = estimate_proj_mat_linear(pixels_1, pos3d)
eval_on_still_balls(P0, P1)
eval_proj_error(P0, pixels_0_red, pos3d_red)
eval_proj_error(P1, pixels_1_red, pos3d_red)
eval_proj_error(P0, pixels_0_black, pos3d_black)
eval_proj_error(P1, pixels_1_black, pos3d_black)
# evaluate on the other (black/red) dataset
# PREPARE FOR A NONLINEAR LEAST SQUARES BASED CALIB UPDATE
A0, R0, t0 = decompose_proj_mat(P0)
A1, R1, t1 = decompose_proj_mat(P1)
'''
# USING SOLVEPNP TO REFINE DOESNT HELP
R0_vec = cv2.Rodrigues(R0)[0]
t0_new = t0.copy()
_ = cv2.solvePnPRansac(pos3d, pixels_0, cameraMatrix=A0, rvec=R0_vec, tvec=t0_new,
distCoeffs=np.zeros((4,
1)),
flags=cv2.SOLVEPNP_EPNP, # cv2.SOLVEPNP_ITERATIVE,
useExtrinsicGuess=True)
R0_new, _ = cv2.Rodrigues(R0_vec)
P0_new = A0.dot(np.hstack((R0_new, t0_new.reshape((3, 1)))))
eval_proj_error(P0_new, pixels_0, pos3d)
R1_vec = cv2.Rodrigues(R1)[0]
t1_new = t1.copy()
_ = cv2.solvePnPRansac(pos3d, pixels_1, cameraMatrix=A1, rvec=R1_vec, tvec=t1_new,
distCoeffs=np.zeros((4,
1)),
flags=cv2.SOLVEPNP_EPNP, # cv2.SOLVEPNP_ITERATIVE,
useExtrinsicGuess=True)
R1_new, _ = cv2.Rodrigues(R1_vec)
P1_new = A1.dot(np.hstack((R1_new, t1_new.reshape((3, 1)))))
eval_proj_error(P1_new, pixels_1, pos3d)
eval_on_still_balls(P0_new, P1_new)
'''
# CUSTOM NONLINEAR LS CODE
'''
cam_mat_0 = A0.copy()
cam_mat_1 = A1.copy()
out0 = cv2.decomposeProjectionMatrix(P0)
out1 = cv2.decomposeProjectionMatrix(P1)
intrinsic_dict_0 = {'fx': cam_mat_0[0, 0], 'fy': cam_mat_0[1, 1],
'shear': cam_mat_0[0, 1], 'u0': cam_mat_0[0, 2],
'v0': cam_mat_0[1, 2], 'scale': cam_mat_0[2, 2]}
intrinsic_dict_1 = {'fx': cam_mat_1[0, 0], 'fy': cam_mat_1[1, 1], 'shear':
cam_mat_1[0, 1], 'u0': cam_mat_1[0, 2], 'v0':
cam_mat_1[1, 2], 'scale': cam_mat_1[2, 2]}
extrinsic_dict_0 = {'euler_angles': out0[-1] * 2*np.pi/360,
'trans_vector': t0}
extrinsic_dict_1 = {'euler_angles': out1[-1] * 2*np.pi/360,
'trans_vector': t1}
params_0 = cal_non.est_calib_params_nonlin(distortion_dict=None,
intrinsic_dict=intrinsic_dict_0,
extrinsic_dict=extrinsic_dict_0,
pts3d=pos3d.T,
pts2d=pixels_0.T,
num_iter_max=10000,
debug=False)
params_1 = cal_non.est_calib_params_nonlin(distortion_dict=None,
intrinsic_dict=intrinsic_dict_1,
extrinsic_dict=extrinsic_dict_1,
pts3d=pos3d.T,
pts2d=pixels_1.T,
num_iter_max=10000,
debug=False)
# form P0_new and P1_new
A0_new = form_intrinsic_mat(params_0['intrinsic'])
E0_new = form_extrinsic_mat(params_0['extrinsic'])
P0_new = A0_new.dot(E0_new)
A1_new = form_intrinsic_mat(params_1['intrinsic'])
E1_new = form_extrinsic_mat(params_1['extrinsic'])
P1_new = A1_new.dot(E1_new)
eval_proj_error(P0_new, pixels_0, pos3d)
eval_proj_error(P1_new, pixels_1, pos3d)
eval_on_still_balls(P0_new, P1_new)
'''
# undistort the pixels
'''
dist_coeffs_0 = np.array(params_0['dist'].values()).astype(np.float32)
dist_coeffs_1 = np.array(params_1['dist'].values()).astype(np.float32)
pixels_0_undistort = np.zeros(pixels_0.shape)
pixels_1_undistort = np.zeros(pixels_0.shape)
# dist_coeffs_0 = np.ones((1, 8), dtype=np.float32)
# dist_coeffs_1 = np.ones((1, 8), dtype=np.float32)
pixels_0_undistort = cv2.undistortPoints(pixels_0[:, np.newaxis, :].astype(
np.float32), cam_mat_0.astype(np.float32), dist_coeffs_0)
pixels_1_undistort = cv2.undistortPoints(pixels_1[:, np.newaxis, :].astype(
np.float32), cam_mat_1.astype(np.float32), dist_coeffs_1)
'''
# compare with old calibration
'''
print('Comparing with old calibration...')
json_file = os.environ['HOME'] + \
"/table-tennis/json/server_3d_conf_ping.json"
with open(json_file, 'r') as f:
old_calib_file = json.load(f)
calibs = old_calib_file["stereo"]["calib"]
calib0 = np.array(calibs[0]['val'])
calib1 = np.array(calibs[1]['val'])
# the robot loc changed since we calibrated last time
dx = np.array([0.0, 0.0, 0.0])
R0 = calib0[:, 0:-1]
R1 = calib1[:, 0:-1]
dt0 = -np.dot(R0, dx)
dt1 = -np.dot(R1, dx)
calib0[:, -1] = calib0[:, -1] + dt0
calib1[:, -1] = calib1[:, -1] + dt1
eval_proj_error(calib0, pixels_0, pos3d)
eval_proj_error(calib1, pixels_1, pos3d)
eval_on_still_balls(calib0, calib1)
'''
|
999,157 | c80910b0c472dbd65479834c97eac0f0a05df648 | # -*- coding: utf-8 -*-
'''
This module holds unit tests. It has nothing to do with the grader tests.
'''
from django.conf import settings
from django.test import TestCase
from access.config import ConfigParser
from util.shell import invoke_script
class ConfigTestCase(TestCase):
def setUp(self):
self.config = ConfigParser()
def test_parsing(self):
from access.config import get_rst_as_html
self.assertEqual(get_rst_as_html('A **foobar**.'), '<p>A <strong>foobar</strong>.</p>\n')
import re
from access.config import iterate_kvp_with_dfs
data = {
'title|i18n': {'en': 'A Title', 'fi': 'Eräs otsikko'},
'text|rst': 'Some **fancy** text with ``links <http://google.com>`` and code like ``echo "moi"``.'
}
self.config._process_exercise_data({ "lang": "en" }, data)
self.assertEqual(data["en"]["text"], data["fi"]["text"])
self.assertEqual(data["en"]["title"], "A Title")
self.assertEqual(data["fi"]["title"], "Eräs otsikko")
def test_loading(self):
courses = self.config.courses()
self.assertGreater(len(courses), 0, "No courses configured")
course_key = courses[0]["key"]
root = self.config._course_root(course_key)
ptime = root["ptime"]
# Ptime changes if cache is missed.
root = self.config._course_root(course_key)
self.assertEqual(ptime, root["ptime"])
def test_shell_invoke(self):
r = invoke_script(settings.PREPARE_SCRIPT, {})
self.assertEqual(1, r["code"])
r = invoke_script(settings.PREPARE_SCRIPT, { "course_key": "foo", "dir": settings.SUBMISSION_PATH })
self.assertEqual(0, r["code"])
|
999,158 | 4baa497c9ef6ccb7c46c2c241f48335bc6350342 | import re
grammar = """
value : lst | var | pair
lst : "[" [value ("," value)*] "]"
pair : "(" value ":" value ")"
var : /[a-z]+/
"""
class Token(str):
kind : str
def __new__(cls, value, kind):
tk = str.__new__(cls, value)
tk.kind = kind
return tk
def __repr__(self):
value = super().__repr__()
return value
REGEX_MAP = {
"var" : r"[a-z]+",
"op" : r"[\[\],():]",
"ws" : r"\s+",
"erro": r"."
}
REGEX = re.compile("|".join(f"(?P<{k}>{v})" for k, v in REGEX_MAP.items()))
def lex(str):
tokens = []
for m in REGEX.finditer(str):
kind = m.lastgroup
value = str[m.start():m.end()]
tk = Token(value, kind)
if kind == "ws":
continue
elif kind == "erro":
raise SyntaxError(r"Bad token: {tk}")
else:
tokens.append(tk)
return tokens
def expect(tk, tokens):
aux_tk = tokens[0]
if aux_tk != tk:
raise SyntaxError(f"Bad token: {aux_tk}")
return tokens.pop(0)
def parse(str):
tokens = lex(str)
tokens.append("$")
res = value(tokens)
if tokens != ["$"]:
raise SyntaxError("espera o fim do arquivo")
return res
def value(tokens):
if tokens[0] == "[":
return lst(tokens)
elif tokens[0] == "(":
return pair(tokens)
tk = tokens.pop(0)
if tk.kind == "var":
return tk
def lst(tokens):
expect("[", tokens)
if(tokens[0] == "]"):
tokens.pop(0)
return[]
arr_tk = [value(tokens)]
tk = tokens.pop(0)
while tk == ",":
arr_tk.append(value(tokens))
tk = tokens.pop(0)
if tk != "]":
raise SyntaxError(f"Bad token: {tk}")
return arr_tk
def pair(tokens):
expect("(", tokens)
left = value(tokens)
expect(":", tokens)
right = value(tokens)
expect(")", tokens)
return (left, right)
src = "[a,b,(c:d),[ab,cd,ef]]"
assert parse(src) == ["a", "b", ("c", "d"), ["ab", "cd", "ef"]] |
999,159 | 7f14711244619ae9057ce85058e0609fa2975af9 | number = range(1,10)
numbers = list(number)
print(numbers)
print('The first three items in the list are:'+str(numbers[:3]))
print('Three items from the middle of the list are:'+str(numbers[3:6]))
print('The last three items in the list are:'+str(numbers[-3:]))
|
999,160 | 2a6b39c05c7e64d4b30bca80220d89393080131a | from aiohttp import web
from openapi_core.validation.request.validators import RequestValidator
from openapi_core.validation.response.validators import ResponseValidator
from .openapi_wrappers import (
PATH_KEY,
QUERY_KEY,
AiohttpOpenAPIRequest,
AiohttpOpenAPIResponse,
)
from .rest_oas import OpenApiSpec, get_specs
from .rest_responses import create_error_response
class OpenApiValidator:
"""
Used to validate data in the request->response cycle against openapi specs
"""
@classmethod
def create(cls, app: web.Application, _version=""):
specs = get_specs(app)
# TODO: one per version!
return cls(specs)
def __init__(self, spec: OpenApiSpec):
self._reqvtor = RequestValidator(spec, custom_formatters=None)
self._resvtor = ResponseValidator(spec, custom_formatters=None)
# Current
self.current_request = None # wrapper request
async def check_request(self, request: web.Request):
self.current_request = None
rq = await AiohttpOpenAPIRequest.create(request)
result = self._reqvtor.validate(rq)
# keeps current request and reuses in response
self.current_request = rq
if result.errors:
err = create_error_response(
result.errors,
"Failed request validation against API specs",
web.HTTPBadRequest,
)
raise err
path, query = [result.parameters[k] for k in (PATH_KEY, QUERY_KEY)]
return path, query, result.body
def check_response(self, response: web.Response):
req = self.current_request
res = AiohttpOpenAPIResponse(
response, response.text
) # FIXME:ONLY IN SERVER side. Async in client!
result = self._resvtor.validate(req, res)
if result.errors:
err = create_error_response(
result.errors,
"Failed response validation against API specs",
web.HTTPServiceUnavailable,
)
raise err
|
999,161 | 088fd5dd7956d41e6f024a10d58e5644fba3defc | import math
def HitungLuasSegitiga(alas,tinggi):
return 0.5*alas*tinggi
def HitungLuasLingkaran(jari):
return math.pi * jari *jari
|
999,162 | db55a4e346527b8fa3bf90b63b4b54badef0a817 | #
# PySNMP MIB module RADLAN-HWENVIROMENT (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RADLAN-HWENVIROMENT
# Produced by pysmi-0.3.4 at Wed May 1 13:01:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
rnd, = mibBuilder.importSymbols("RADLAN-MIB", "rnd")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, iso, TimeTicks, Gauge32, MibIdentifier, ObjectIdentity, ModuleIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, IpAddress, Bits, Integer32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "TimeTicks", "Gauge32", "MibIdentifier", "ObjectIdentity", "ModuleIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "IpAddress", "Bits", "Integer32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
DisplayString, = mibBuilder.importSymbols("SNMPv2-TC-v1", "DisplayString")
rlEnv = ModuleIdentity((1, 3, 6, 1, 4, 1, 89, 83))
rlEnv.setRevisions(('2003-09-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlEnv.setRevisionsDescriptions(('Added this MODULE-IDENTITY clause.',))
if mibBuilder.loadTexts: rlEnv.setLastUpdated('200309210000Z')
if mibBuilder.loadTexts: rlEnv.setOrganization('Radlan Computer Communications Ltd.')
if mibBuilder.loadTexts: rlEnv.setContactInfo('radlan.com')
if mibBuilder.loadTexts: rlEnv.setDescription('The private MIB module definition for environment of Radlan devices.')
class RlEnvMonState(TextualConvention, Integer32):
description = 'Represents the state of a device being monitored. Valid values are: normal(1): the environment is good, such as low temperature. warning(2): the environment is bad, such as temperature above normal operation range but not too high. critical(3): the environment is very bad, such as temperature much higher than normal operation limit. shutdown(4): the environment is the worst, the system should be shutdown immediately. notPresent(5): the environmental monitor is not present, such as temperature sensors do not exist. notFunctioning(6): the environmental monitor does not function properly, such as a temperature sensor generates a abnormal data like 1000 C. '
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("normal", 1), ("warning", 2), ("critical", 3), ("shutdown", 4), ("notPresent", 5), ("notFunctioning", 6))
rlEnvPhysicalDescription = MibIdentifier((1, 3, 6, 1, 4, 1, 89, 83, 1))
rlEnvMonFanStatusTable = MibTable((1, 3, 6, 1, 4, 1, 89, 83, 1, 1), )
if mibBuilder.loadTexts: rlEnvMonFanStatusTable.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonFanStatusTable.setDescription('The table of fan status maintained by the environmental monitor.')
rlEnvMonFanStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 83, 1, 1, 1), ).setIndexNames((0, "RADLAN-HWENVIROMENT", "rlEnvMonFanStatusIndex"))
if mibBuilder.loadTexts: rlEnvMonFanStatusEntry.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonFanStatusEntry.setDescription('An entry in the fan status table, representing the status of the associated fan maintained by the environmental monitor.')
rlEnvMonFanStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: rlEnvMonFanStatusIndex.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonFanStatusIndex.setDescription('Unique index for the fan being instrumented. This index is for SNMP purposes only, and has no intrinsic meaning.')
rlEnvMonFanStatusDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvMonFanStatusDescr.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonFanStatusDescr.setDescription('Textual description of the fan being instrumented. This description is a short textual label, suitable as a human-sensible identification for the rest of the information in the entry.')
rlEnvMonFanState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 1, 1, 3), RlEnvMonState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvMonFanState.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonFanState.setDescription('The mandatory state of the fan being instrumented.')
rlEnvMonSupplyStatusTable = MibTable((1, 3, 6, 1, 4, 1, 89, 83, 1, 2), )
if mibBuilder.loadTexts: rlEnvMonSupplyStatusTable.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonSupplyStatusTable.setDescription('The table of power supply status maintained by the environmental monitor card.')
rlEnvMonSupplyStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 89, 83, 1, 2, 1), ).setIndexNames((0, "RADLAN-HWENVIROMENT", "rlEnvMonSupplyStatusIndex"))
if mibBuilder.loadTexts: rlEnvMonSupplyStatusEntry.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonSupplyStatusEntry.setDescription('An entry in the power supply status table, representing the status of the associated power supply maintained by the environmental monitor card.')
rlEnvMonSupplyStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: rlEnvMonSupplyStatusIndex.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonSupplyStatusIndex.setDescription('Unique index for the power supply being instrumented. This index is for SNMP purposes only, and has no intrinsic meaning.')
rlEnvMonSupplyStatusDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvMonSupplyStatusDescr.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonSupplyStatusDescr.setDescription('Textual description of the power supply being instrumented. This description is a short textual label, suitable as a human-sensible identification for the rest of the information in the entry.')
rlEnvMonSupplyState = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 2, 1, 3), RlEnvMonState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvMonSupplyState.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonSupplyState.setDescription('The mandatory state of the power supply being instrumented.')
rlEnvMonSupplySource = MibTableColumn((1, 3, 6, 1, 4, 1, 89, 83, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("ac", 2), ("dc", 3), ("externalPowerSupply", 4), ("internalRedundant", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlEnvMonSupplySource.setStatus('current')
if mibBuilder.loadTexts: rlEnvMonSupplySource.setDescription('The power supply source. unknown - Power supply source unknown ac - AC power supply dc - DC power supply externalPowerSupply - External power supply internalRedundant - Internal redundant power supply ')
mibBuilder.exportSymbols("RADLAN-HWENVIROMENT", PYSNMP_MODULE_ID=rlEnv, rlEnvMonSupplyStatusDescr=rlEnvMonSupplyStatusDescr, rlEnvMonFanStatusEntry=rlEnvMonFanStatusEntry, rlEnvMonFanState=rlEnvMonFanState, rlEnvMonSupplyStatusIndex=rlEnvMonSupplyStatusIndex, rlEnvMonFanStatusDescr=rlEnvMonFanStatusDescr, rlEnvMonSupplyStatusTable=rlEnvMonSupplyStatusTable, rlEnvMonSupplySource=rlEnvMonSupplySource, rlEnvPhysicalDescription=rlEnvPhysicalDescription, rlEnvMonSupplyState=rlEnvMonSupplyState, rlEnvMonFanStatusTable=rlEnvMonFanStatusTable, RlEnvMonState=RlEnvMonState, rlEnvMonFanStatusIndex=rlEnvMonFanStatusIndex, rlEnv=rlEnv, rlEnvMonSupplyStatusEntry=rlEnvMonSupplyStatusEntry)
|
999,163 | e7a339b8496a3d69f9f65ae35f74dfe4a931eb97 | # -*- coding: utf-8 -*-
import sys
from distutils.core import setup
# заглушка, если distutils не поддеерживает команду test
if __name__ == '__main__' and sys.argv[1] == 'test':
exit()
with open('README.md') as readme:
long_description = readme.read()
setup(
name='sentry-server',
version='',
author='Valery Sukhomlinov',
author_email='good-guy@good-guy.me',
packages=[],
license='BSD',
description='Sentry server',
long_description=long_description,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
],
)
|
999,164 | 88762bbb4c10153c5f0ddd8671e74bfacb40edb9 | """ Object detection using Pretrained Tensorflow Model
Credits: This code is modified from the online TensorFlow object detection tutorial available here:
https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
"""
#Imports
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import io
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from PIL import Image
import base64
from base64 import decodestring
# Used to run RPC server
import msgpackrpc
# This is needed since the code is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
# Utilities used in Object detection imports
from utils import label_map_util
from utils import visualization_utils as vis_util
if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')
"""
The list of available modes:
https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
"""
# The model used for the inference
MODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('../data', 'mscoco_label_map.pbtxt')
# Downloading the model, in case it is not present
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
# load image into numpy array
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Test images
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# TensorFlow Session
sess=tf.Session(graph=detection_graph)
ops = detection_graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
#print(all_tensor_names)
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = detection_graph.get_tensor_by_name(tensor_name)
# Running inference on single image
def run_inference_for_single_image(image, graph):
# Get handles to input and output tensors
image_tensor = graph.get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
class InferenceServer(object):
def Test(self, x, y):
print('Requst:',x,y)
return x + y
def push(self,data,time2):
image_data = base64.b64decode(data)
image = Image.open(io.BytesIO(image_data))
#Converting to numpy array
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# The results of a detection is visualized
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
im = Image.fromarray(image_np)
im.save('abc.jpg')
print("Done")
return "Done"
#except Exception as inst:
server = msgpackrpc.Server(InferenceServer())
server.listen(msgpackrpc.Address("localhost", 18800))
server.start()
# for image_path in TEST_IMAGE_PATHS:
# image = Image.open(image_path)
# # the array based representation of the image will be used later in order to prepare the
# # result image with boxes and labels on it.
# image_np = load_image_into_numpy_array(image)
# # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# image_np_expanded = np.expand_dims(image_np, axis=0)
# # Actual detection.
# output_dict = run_inference_for_single_image(image_np, detection_graph)
# # Visualization of the results of a detection.
# vis_util.visualize_boxes_and_labels_on_image_array(
# image_np,
# output_dict['detection_boxes'],
# output_dict['detection_classes'],
# output_dict['detection_scores'],
# category_index,
# instance_masks=output_dict.get('detection_masks'),
# use_normalized_coordinates=True,
# line_thickness=8)
# im = Image.fromarray(image_np)
# im.save('abc.jpg')
|
999,165 | 2edcfc584bcca9bca7e5c87634d45e56a0fa1188 | import os
import mysql.connector
from dotenv import load_dotenv
from Loggers import logger
load_dotenv()
class joins:
def __init__(self):
host=os.getenv('HOST')
user=os.getenv('USER1')
passwd=os.getenv('PASSWD')
auth_plugin=os.getenv('AUTH_PLUGIN')
self.db_connection = mysql.connector.connect(
host= host,
user=user,
passwd=passwd,
auth_plugin=auth_plugin
)
self.db_cursor = self.db_connection.cursor()
def print_connection(self):
'''
Description:
this function prints the connection object.
Parameter:
it takes self as parameter.
'''
try:
logger.info(self.db_connection)
except Exception as e:
logger.error(e)
def display(self):
'''
Description:
This function Display the data of the table.
Parameter:
it takes self as parameter.
'''
try:
self.db_cursor.execute("USE joins_db")
self.db_cursor.execute("SELECT *FROM employee")
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
self.db_cursor.execute("SELECT *FROM customer")
result2 = self.db_cursor.fetchall()
for x1 in result2:
logger.info(x1)
except Exception as e:
logger.error(e)
def innerjoin(self):
'''
Description:
This function performs INNER JOIN.
Parameter:
it takes self as paramter.
'''
try:
self.db_cursor.execute('''SELECT employee.EMP_NAME, customer.CUST_NAME FROM employee
INNER JOIN customer ON employee.ID = customer.ID''')
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
except Exception as e:
logger.error(e)
def left_join(self):
'''
Description:
This function performs LEFT JOIN.
Parameter:
it takes self as paramter.
'''
try:
self.db_cursor.execute('''SELECT employee.EMP_NAME, customer.CUST_NAME FROM employee
LEFT JOIN customer ON employee.ID = customer.ID''')
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
self.db_cursor.execute('''SELECT employee.JOB_NAME, customer.SALARY FROM employee
LEFT JOIN customer ON employee.ID = customer.ID''')
result = self.db_cursor.fetchall()
for x1 in result:
logger.info(x1)
except Exception as e:
logger.error(e)
def right_join(self):
'''
Description:
This function performs RIGHT JOIN.
Parameter:
it takes self as paramter.
'''
try:
self.db_cursor.execute('''SELECT employee.EMP_NAME, customer.CUST_NAME FROM employee
RIGHT JOIN customer ON employee.ID = customer.ID''')
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
self.db_cursor.execute('''SELECT employee.JOB_NAME, customer.SALARY FROM employee
RIGHT JOIN customer ON employee.ID = customer.ID''')
result1 = self.db_cursor.fetchall()
for x1 in result1:
logger.info(x1)
except Exception as e:
logger.error(e)
def full_join(self):
'''
Description:
This function performs RIGHT JOIN.
Parameter:
it takes self as paramter.
'''
try:
self.db_cursor.execute('''SELECT employee.EMP_NAME, customer.CUST_NAME FROM employee
LEFT JOIN customer ON employee.ID = customer.ID
UNION SELECT employee.EMP_NAME, customer.CUST_NAME FROM employee
RIGHT JOIN customer ON employee.ID = customer.ID''')
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
except Exception as e:
logger.error(e)
def cross_join(self):
'''
Description:
This function performs CROSS JOIN.
Parameter:
it takes self as paramter.
'''
try:
self.db_cursor.execute('''SELECT employee.EMP_NAME, customer.CUST_NAME FROM employee
CROSS JOIN customer ''')
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
self.db_cursor.execute('''SELECT employee.JOB_NAME, customer.SALARY FROM employee
CROSS JOIN customer ''')
result1 = self.db_cursor.fetchall()
for x1 in result1:
logger.info(x1)
except Exception as e:
logger.error(e)
def self_join(self):
'''
Description:
This function performs SELF JOIN.
Parameter:
it takes self as paramter.
'''
try:
self.db_cursor.execute('''SELECT A.ID AS ID1, B.ID AS ID2, A.job_name
FROM employee A, employee B
WHERE A.emp_name = B.emp_name''')
result = self.db_cursor.fetchall()
for x in result:
logger.info(x)
self.db_cursor.execute('''SELECT A.id,B.emp_name
FROM employee AS A, employee B
WHERE A.job_name = B.job_name''')
result = self.db_cursor.fetchall()
for x1 in result:
logger.info(x1)
except Exception as e:
logger.error(e)
if __name__ == "__main__":
join = joins()
join.print_connection()
join.display()
join.innerjoin()
join.left_join()
join.right_join()
join.full_join()
join.cross_join()
join.self_join()
|
999,166 | e564f265b15bf29abe33554e15aeb1608ceea1de | import tensorflow as tf
import pickle
def build_estimator(model_dir, model_type="combined"):
"""Build an estimator."""
# Sparse base columns.
with open('list_articles.txt', 'rb') as f:
list_articles = pickle.load(f)
with open('list_productgroup.txt', 'rb') as f:
list_productgroup = pickle.load(f)
with open('list_category.txt', 'rb') as f:
list_category = pickle.load(f)
with open('list_sizes.txt', 'rb') as f:
list_sizes = pickle.load(f)
with open('list_month.txt', 'rb') as f:
list_month = pickle.load(f)
country = tf.contrib.layers.sparse_column_with_keys(column_name="country",
keys=['Germany', 'Austria', 'France'])
promo1 = tf.contrib.layers.sparse_column_with_keys(column_name="promo1",
keys=['1', '0'])
promo2 = tf.contrib.layers.sparse_column_with_keys(column_name="promo2",
keys=["0", "1"])
article = tf.contrib.layers.sparse_column_with_keys(column_name="article",
keys=list_articles)
productgroup = tf.contrib.layers.sparse_column_with_keys(column_name="productgroup",
keys=list_productgroup)
category = tf.contrib.layers.sparse_column_with_keys(column_name="category",
keys=list_category)
style = tf.contrib.layers.sparse_column_with_keys(column_name="style",
keys=['wide', 'slim', 'regular'])
sizes = tf.contrib.layers.sparse_column_with_keys(column_name="sizes",
keys=list_sizes)
gender = tf.contrib.layers.sparse_column_with_keys(column_name="gender",
keys=['unisex', 'men', 'kids', 'women'])
month = tf.contrib.layers.sparse_column_with_keys(column_name="month",
keys=list_month)
# Continuous base columns.
regular_price = tf.contrib.layers.real_valued_column("regular_price")
current_price = tf.contrib.layers.real_valued_column("current_price")
ratio = tf.contrib.layers.real_valued_column("ratio")
cost = tf.contrib.layers.real_valued_column("cost")
day = tf.contrib.layers.real_valued_column("day")
week = tf.contrib.layers.real_valued_column("week")
# month = tf.contrib.layers.real_valued_column("month")
year = tf.contrib.layers.real_valued_column("year")
dayofyear = tf.contrib.layers.real_valued_column("dayofyear")
rgb_r_main_col = tf.contrib.layers.real_valued_column("rgb_r_main_col")
rgb_g_main_col = tf.contrib.layers.real_valued_column("rgb_g_main_col")
rgb_b_main_col = tf.contrib.layers.real_valued_column("rgb_b_main_col")
rgb_r_sec_col = tf.contrib.layers.real_valued_column("rgb_r_sec_col")
rgb_g_sec_col = tf.contrib.layers.real_valued_column("rgb_g_sec_col")
rgb_b_sec_col = tf.contrib.layers.real_valued_column("rgb_b_sec_col")
# Transformations.
rgb_r_main_col_buckets = tf.contrib.layers.bucketized_column(rgb_r_main_col,
boundaries=[
32, 64, 96, 128, 160, 192, 224])
rgb_g_main_col_buckets = tf.contrib.layers.bucketized_column(rgb_g_main_col,
boundaries=[
32, 64, 96, 128, 160, 192, 224])
rgb_b_main_col_buckets = tf.contrib.layers.bucketized_column(rgb_b_main_col,
boundaries=[
32, 64, 96, 128, 160, 192, 224])
rgb_r_sec_col_buckets = tf.contrib.layers.bucketized_column(rgb_r_sec_col,
boundaries=[
32, 64, 96, 128, 160, 192, 224])
rgb_g_sec_col_buckets = tf.contrib.layers.bucketized_column(rgb_g_sec_col,
boundaries=[
32, 64, 96, 128, 160, 192, 224])
rgb_b_sec_col_buckets = tf.contrib.layers.bucketized_column(rgb_b_sec_col,
boundaries=[
32, 64, 96, 128, 160, 192, 224])
# Wide columns and deep columns.
wide_columns = [country, promo1, promo2, article, productgroup,
category, style, sizes, gender,
tf.contrib.layers.crossed_column(
[rgb_r_main_col_buckets, rgb_g_main_col_buckets, rgb_b_main_col_buckets],
hash_bucket_size=int(1e6)),
tf.contrib.layers.crossed_column(
[rgb_r_sec_col_buckets, rgb_g_sec_col_buckets, rgb_b_sec_col_buckets],
hash_bucket_size=int(1e6)),
day, week, month, year]
deep_columns = [
tf.contrib.layers.embedding_column(country, dimension=2),
tf.contrib.layers.embedding_column(promo1, dimension=2),
tf.contrib.layers.embedding_column(promo2, dimension=2),
tf.contrib.layers.embedding_column(article, dimension=9),
tf.contrib.layers.embedding_column(productgroup, dimension=3),
tf.contrib.layers.embedding_column(category, dimension=5),
tf.contrib.layers.embedding_column(style, dimension=2),
tf.contrib.layers.embedding_column(sizes, dimension=3),
tf.contrib.layers.embedding_column(gender, dimension=2),
tf.contrib.layers.embedding_column(month, dimension=4),
regular_price, current_price, ratio, cost,
day, week, year, dayofyear,
rgb_r_main_col, rgb_g_main_col, rgb_b_main_col,
rgb_r_sec_col, rgb_g_sec_col, rgb_b_sec_col
]
if model_type == "wide":
m = tf.contrib.learn.LinearRegressor(model_dir=model_dir,
feature_columns=wide_columns)
elif model_type == "deep":
m = tf.contrib.learn.DNNRegressor(model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=[100, 50])
else:
m = tf.contrib.learn.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[50, 30, 10],
# dnn_dropout=0.5,
dnn_optimizer=tf.train.ProximalAdagradOptimizer(learning_rate=0.1
# ,
# l1_regularization_strength=0.001,
# l2_regularization_strength=0.001
),
fix_global_step_increment_bug=True,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=3, save_checkpoints_secs=100))
return m
|
999,167 | 80cf6eb92061c666a496c93336cac9a38f195e15 | from django.http.response import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import Group
from django.shortcuts import render,redirect
from vehicles import forms,models
from django.contrib.auth.decorators import login_required,user_passes_test
from django.db.models import Q,Sum
#########################################################################################################################
# HOME VIEWS BEGINS
#########################################################################################################################
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicles/index.html')
def afterlogin_view(request):
if is_customer(request.user):
return redirect('customer-dashboard')
elif is_mechanic(request.user):
accountapproval=models.Mechanic.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('mechanic-dashboard')
else:
return render(request,'vehicles/mechanic/wait_for_approval.html')
else:
return redirect('/admin')
def aboutus_view(request):
if(request.user.groups.filter(name='CUSTOMER').exists()):
return render(request,'vehicles/customer/aboutus.html')
elif(request.user.groups.filter(name='MECHANIC').exists()):
return render(request,'vehicles/mechanic/aboutus.html')
else:
return render(request,'vehicles/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
print(email,name,message)
return redirect('')
if(request.user.groups.filter(name='CUSTOMER').exists()):
return render(request,'vehicles/customer/contactus.html',{'form':sub})
elif(request.user.groups.filter(name='MECHANIC').exists()):
return render(request,'vehicles/mechanic/contactus.html',{'form':sub})
else:
return render(request, 'vehicles/contactus.html', {'form':sub})
#########################################################################################################################
# Home View Ends:
#########################################################################################################################
#########################################################################################################################
# Customers View Begins:
#########################################################################################################################
def customer_signup_view(request):
userForm=forms.CustomerUserForm()
customerForm=forms.CustomerForm()
mydict={'userForm':userForm,'customerForm':customerForm}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST)
customerForm=forms.CustomerForm(request.POST,request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customer=customerForm.save(commit=False)
customer.user=user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return redirect('customerlogin')
return render(request,'vehicles/customer/signup.html',mydict)
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicles/customer/click.html')
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_dashboard_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(customer_id=customer.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).count()
new_request_made=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Pending") | Q(status="Approved")).count()
bill=models.Request.objects.all().filter(customer_id=customer.id).filter(Q(status="Repairing Done") | Q(status="Released")).aggregate(Sum('cost'))
if(bill['cost__sum']==None):
bill['cost__sum']=0
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_request_made':new_request_made,
'bill':bill['cost__sum'],
'customer':customer,
}
return render(request,'vehicles/customer/dashboard.html',context=dict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicles/customer/profile.html',{'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def edit_customer_profile_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=customer.user_id)
userForm=forms.CustomerUserForm(instance=user)
customerForm=forms.CustomerForm(request.FILES,instance=customer)
mydict={'userForm':userForm,'customerForm':customerForm,'customer':customer}
if request.method=='POST':
userForm=forms.CustomerUserForm(request.POST,instance=user)
customerForm=forms.CustomerForm(request.POST,instance=customer)
if userForm.is_valid() and customerForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
customerForm.save()
return HttpResponseRedirect('customer-dashboard')
return render(request,'vehicles/customer/edit_profile.html',context=mydict)
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_request_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
return render(request,'vehicles/customer/request.html',{'customer':customer})
def customer_view_request(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id , status="Pending")
return render(request,'vehicles/customer/view_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_delete_request_view(request,pk):
enquiry=models.Request.objects.get(id=pk)
if(enquiry.status=='Pending'):
enquiry.delete()
else:
print("You Can Only delete Pending Request")
return redirect('customer-view-request')
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_add_request(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry=forms.RequestForm()
if request.method=='POST':
enquiry=forms.RequestForm(request.POST)
if enquiry.is_valid():
customer=models.Customer.objects.get(user_id=request.user.id)
enquiry_x=enquiry.save(commit=False)
enquiry_x.customer=customer
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('customer-dashboard')
return render(request,'vehicles/customer/add-request.html',{'enquiry':enquiry,'customer':customer})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_approved_request(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicles/customer/approved_request.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_approved_request_invoice(request):
customer=models.Customer.objects.get(user_id=request.user.id)
enquiries=models.Request.objects.all().filter(customer_id=customer.id).exclude(status='Pending')
return render(request,'vehicles/customer/approved_request_invoice.html',{'customer':customer,'enquiries':enquiries})
@login_required(login_url='customerlogin')
@user_passes_test(is_customer)
def customer_feedback_view(request):
customer=models.Customer.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return redirect('customer-dashboard')
return render(request,'vehicles/customer/feedback.html',{'feedback':feedback,'customer':customer})
#########################################################################################################################
# Customers View End:
#########################################################################################################################
#########################################################################################################################
# Mechanic View Begins:
#########################################################################################################################
def mechanicclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'vehicles/mechanic/click.html')
def mechanic_signup_view(request):
userForm=forms.MechanicUserForm()
mechanicForm=forms.MechanicForm()
mydict={'userForm':userForm,'mechanicForm':mechanicForm}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST)
mechanicForm=forms.MechanicForm(request.POST,request.FILES)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanic=mechanicForm.save(commit=False)
mechanic.user=user
mechanic.save()
my_mechanic_group = Group.objects.get_or_create(name='MECHANIC')
my_mechanic_group[0].user_set.add(user)
return redirect('mechaniclogin')
return render(request,'vehicles/mechanic/signup.html',mydict)
def is_mechanic(user):
return user.groups.filter(name='MECHANIC').exists()
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_dashboard_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
work_in_progress=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing').count()
work_completed=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Repairing Done').count()
new_work_assigned=models.Request.objects.all().filter(mechanic_id=mechanic.id,status='Approved').count()
salary=mechanic.salary
if(salary):
pass
else:
salary=0
dict={
'work_in_progress':work_in_progress,
'work_completed':work_completed,
'new_work_assigned':new_work_assigned,
'salary':salary,
'mechanic':mechanic,
}
return render(request,'vehicles/mechanic/dashboard.html',context=dict)
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
return render(request,'vehicles/mechanic/profile.html',{'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def edit_mechanic_profile_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
user=models.User.objects.get(id=mechanic.user_id)
userForm=forms.MechanicUserForm(instance=user)
mechanicForm=forms.MechanicForm(request.FILES,instance=mechanic)
mydict={'userForm':userForm,'mechanicForm':mechanicForm,'mechanic':mechanic}
if request.method=='POST':
userForm=forms.MechanicUserForm(request.POST,instance=user)
mechanicForm=forms.MechanicForm(request.POST,request.FILES,instance=mechanic)
if userForm.is_valid() and mechanicForm.is_valid():
user=userForm.save()
user.set_password(user.password)
user.save()
mechanicForm.save()
return redirect('mechanic-profile')
return render(request,'vehicles/mechanic/edit_profile.html',context=mydict)
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def work_assigned_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
works=models.Request.objects.all().filter(mechanic_id=mechanic.id)
return render(request,'vehicles/mechanic/work_assigned.html',{'works':works,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def update_status_view(request,pk):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
updateStatus=forms.MechanicUpdateStatusForm()
if request.method=='POST':
updateStatus=forms.MechanicUpdateStatusForm(request.POST)
if updateStatus.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.status=updateStatus.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/mechanic-work-assigned')
return render(request,'vehicles/mechanic/update_status.html',{'updateStatus':updateStatus,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def update_status_view(request,pk):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
updateStatus=forms.MechanicUpdateStatusForm()
if request.method=='POST':
updateStatus=forms.MechanicUpdateStatusForm(request.POST)
if updateStatus.is_valid():
enquiry_x=models.Request.objects.get(id=pk)
enquiry_x.status=updateStatus.cleaned_data['status']
enquiry_x.save()
else:
print("form is invalid")
return HttpResponseRedirect('/mechanic-work-assigned')
return render(request,'vehicles/mechanic/update_status.html',{'updateStatus':updateStatus,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def salary_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
workdone=models.Request.objects.all().filter(mechanic_id=mechanic.id).filter(Q(status="Repairing Done") | Q(status="Released"))
return render(request,'vehicles/mechanic/salary.html',{'workdone':workdone,'mechanic':mechanic})
@login_required(login_url='mechaniclogin')
@user_passes_test(is_mechanic)
def mechanic_feedback_view(request):
mechanic=models.Mechanic.objects.get(user_id=request.user.id)
feedback=forms.FeedbackForm()
if request.method=='POST':
feedback=forms.FeedbackForm(request.POST)
if feedback.is_valid():
feedback.save()
else:
print("form is invalid")
return redirect('mechanic-dashboard')
return render(request,'vehicles/mechanic/feedback.html',{'feedback':feedback,'mechanic':mechanic})
#########################################################################################################################
# Mechanic View Ends:
#########################################################################################################################
#for showing signup/login button for ADMIN(by varun)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return HttpResponseRedirect('adminlogin') |
999,168 | ea536a9a1353cb1f90c85851641dbae242a225d8 | # coding = utf-8
'''
@author = super_fazai
@File : simple_menu.py
@Time : 2017/8/14 19:30
@connect : superonesfazai@gmail.com
'''
"""
创建一个菜单栏, 该菜单栏有一个退出操作的菜单
"""
'''
(Mac OS以不同的方式处理菜单栏)
要获得类似的结果, 我们可以添加以下行: menubar.setNativeMenuBar(False)
'''
import sys
from PyQt5.QtWidgets import (QMainWindow, QAction, qApp, QApplication)
from PyQt5.QtGui import QIcon
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
exit_act = QAction(QIcon('../images/imagespython.jpg'), '&Exit', self)
# QAction是使用菜单栏, 工具栏或者自定义键盘快捷方式执行动作的抽象
# 上一行创建了一个具有特定图标和'退出'标签的操作
exit_act.setShortcut('Ctrl+Q') # 为这个操作定义了快捷方式
exit_act.setStatusTip('Exit application') # 创建底部状态栏显示提示信息在状态栏中
exit_act.triggered.connect(qApp.quit) # 当执行这个操作, 发出触发信号, 信号连接到小部件的quit()方法QApplication, 终止了应用程序
self.statusBar()
menu_bar = self.menuBar() # 创建一个菜单栏对象
file_menu = menu_bar.addMenu('&File') # 创建一个文件菜单
file_menu.addAction(exit_act) # 并添加动作addAction()
menu_bar.setNativeMenuBar(False) # 添加这句话是为了针对让mac os下显示菜单栏
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('simple menu')
self.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
999,169 | c14c5db50cb5b6d4180c2db0684324bc5d288965 | import asyncio
import json
import typer
from hue import Light
from hue.cli.console import console
app = typer.Typer()
@app.command()
def info(
id: int = typer.Argument(1),
ip: str = typer.Option(..., "--ip", "-i", envvar="HUE_BRIDGE_IP"),
user: str = typer.Option(..., "--user", "-u", envvar="HUE_BRIDGE_USER"),
):
"""List all the information about a Hue Light"""
light = Light(id, ip=ip, user=user)
resp = asyncio.run(light.get_info())
console.print(f"[{ip}] Light {id}:\n{json.dumps(resp, indent=2)}")
@app.command()
def get(
id: int = typer.Argument(1),
ip: str = typer.Option(..., "--ip", "-i", envvar="HUE_BRIDGE_IP"),
user: str = typer.Option(..., "--user", "-u", envvar="HUE_BRIDGE_USER"),
):
"""Get the state of a Light"""
light = Light(id, ip=ip, user=user)
resp = asyncio.run(light.get_state())
console.print(f"[{ip}] Light {id} State:\n{json.dumps(resp, indent=2)}")
@app.command()
def on(
id: int = typer.Argument(1),
ip: str = typer.Option(..., "--ip", "-i", envvar="HUE_BRIDGE_IP"),
user: str = typer.Option(..., "--user", "-u", envvar="HUE_BRIDGE_USER"),
):
"""Power on a light"""
light = Light(id, ip=ip, user=user)
resp = asyncio.run(light.power_on())
console.print(f"[{ip}] Light {id} On:\n{json.dumps(resp, indent=2)}")
@app.command()
def off(
id: int = typer.Argument(1),
ip: str = typer.Option(..., "--ip", "-i", envvar="HUE_BRIDGE_IP"),
user: str = typer.Option(..., "--user", "-u", envvar="HUE_BRIDGE_USER"),
):
"""Power off a light"""
light = Light(id, ip=ip, user=user)
resp = asyncio.run(light.power_off())
console.print(f"[{ip}] Light {id} Off:\n{json.dumps(resp, indent=2)}")
@app.command()
def toggle(
id: int = typer.Argument(1),
ip: str = typer.Option(..., "--ip", "-i", envvar="HUE_BRIDGE_IP"),
user: str = typer.Option(..., "--user", "-u", envvar="HUE_BRIDGE_USER"),
):
"""Toggle the power state of a light"""
light = Light(id, ip=ip, user=user)
resp = asyncio.run(light.toggle())
console.print(f"[{ip}] Light {id} Toggle:\n{json.dumps(resp, indent=2)}")
@app.command()
def brightness(
id: int = typer.Argument(1),
ip: str = typer.Option(..., "--ip", "-i", envvar="HUE_BRIDGE_IP"),
user: str = typer.Option(..., "--user", "-u", envvar="HUE_BRIDGE_USER"),
brightness: int = typer.Option(..., "--brightness", "-b", min=1, max=255),
):
"""Set the brightness of a light"""
light = Light(id, ip=ip, user=user)
resp = asyncio.run(light.set_brightness(brightness))
console.print(f"[{ip}] Light {id} Brightness:\n{json.dumps(resp, indent=2)}")
|
999,170 | d4a8930ad54e6051b09c91174f2e8a00003b2ea5 | # face detection with mtcnn on a photograph. this code is to identify lips portion in face and fetch color of it.
# Then verify the selected lipstick color is matched with lips color after the lipstick has been applied.
import re
import time
import allure
import cv2
from PIL import Image
from allure_commons.types import AttachmentType
from matplotlib import pyplot
from matplotlib.patches import Circle
from matplotlib.patches import Rectangle
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from mtcnn.mtcnn import MTCNN
# draw an image with detected objects
def draw_image_with_boxes(filename, result_list, inputColor):
try:
# load the image
data = pyplot.imread(filename)
# plot the image
pyplot.imshow(data)
# get the context for drawing boxes
ax = pyplot.gca()
count = 0
pyplot.show(block=False)
# plot each box
for result in result_list:
# get coordinates
x, y, width, height = result['box']
# create the shape
rect = Rectangle((x, y), width, height, fill=False, color='red')
# draw the box
ax.add_patch(rect)
lip_point_x = []
lip_point_y = []
# draw the dots
for key, value in result['keypoints'].items():
# create and draw dot
print(key)
if (key == "mouth_left" or key == "mouth_right"):
count = count + 1
lip_point_x.append(value[0])
lip_point_y.append(value[1])
# print("lip,start point",lip_start_point_x)
if (count == 2):
print("wow!!!")
dot = Circle(value, radius=5, color='red')
ax.add_patch(dot)
# print(value)
print(count)
print(lip_point_x)
print(lip_point_y)
i = lip_point_x[0]
print("starting X co-ordinate", i)
j = lip_point_y[0]
print("starting Y co-ordinate", j)
red_image = Image.open(filename)
red_image_rgb = red_image.convert("RGB")
temp = re.findall(r'\d+', inputColor)
rgb_pixel_value_chosen_color = list(map(int, temp))
print("input color array is ", rgb_pixel_value_chosen_color)
# rgb_pixel_value_chosen_color = red_image_rgb.getpixel((1210, 476))
print("Chosen color RGB", rgb_pixel_value_chosen_color)
r_value = 0
g_value = 0
b_value = 0
if (j <= lip_point_y[1]):
while (j <= lip_point_y[1]):
while (i <= lip_point_x[1]):
rgb_pixel_value_on_face = red_image_rgb.getpixel((i, j))
print(i, j)
print("RGB color", rgb_pixel_value_on_face)
if rgb_pixel_value_chosen_color[0] - 20 <= rgb_pixel_value_on_face[0] <= \
rgb_pixel_value_chosen_color[0] + 20:
r_value = 1
if rgb_pixel_value_chosen_color[1] - 22 <= rgb_pixel_value_on_face[1] <= \
rgb_pixel_value_chosen_color[1] + 22:
g_value = 1
if rgb_pixel_value_chosen_color[2] - 30 <= rgb_pixel_value_on_face[2] <= \
rgb_pixel_value_chosen_color[2] + 30:
b_value = 1
i = i + 1
j = j + 1
else:
while (j >= lip_point_y[1]):
while (i <= lip_point_x[1]):
rgb_pixel_value_on_face = red_image_rgb.getpixel((i, j))
print(i, j)
print("RGB color", rgb_pixel_value_on_face)
if rgb_pixel_value_chosen_color[0] - 20 <= rgb_pixel_value_on_face[0] <= \
rgb_pixel_value_chosen_color[0] + 20:
r_value = 1
if rgb_pixel_value_chosen_color[1] - 22 <= rgb_pixel_value_on_face[1] <= \
rgb_pixel_value_chosen_color[1] + 22:
g_value = 1
if rgb_pixel_value_chosen_color[2] - 30 <= rgb_pixel_value_on_face[2] <= \
rgb_pixel_value_chosen_color[2] + 30:
b_value = 1
i = i + 1
j = j - 1
if (r_value == 1 and g_value == 1 and b_value == 1):
print("Selected color has been applied to lips successfully")
else:
print("Selected color has not been applied to lips")
# show the plot
pyplot.draw()
time.sleep(5)
pyplot.show(block=False)
time.sleep(5)
pyplot.close('all')
assert True
except:
print("Something went wrong")
assert False
# _____________________________________________________________________________________________________________________
# selenium: navigate to page and take screenshot
def selenium_get_screenshot(filename):
options = Options()
options.add_argument("start-maximized")
options.add_experimental_option("prefs", { \
# "profile.default_content_setting_values.media_stream_mic": 1,
"profile.default_content_setting_values.media_stream_camera": 1,
# "profile.default_content_setting_values.geolocation": 1,
"profile.default_content_setting_values.notifications": 1
})
driver = webdriver.Chrome(options=options,
executable_path=r'C:\Program Files\Google\Chrome\Application\chromedriver.exe')
wait = WebDriverWait(driver, 60)
driver.implicitly_wait(20)
print("Navigating to page")
driver.get('https://www.maccosmetics.com/product/13854/60284/products/makeup/lips/lipstick/powder-kiss-lipstick#/')
try:
inputElement = driver.find_element_by_xpath("(//div[@class='product-full__shade-swatch'])[6]")
inputElement.click()
allure.attach(driver.get_screenshot_as_png(), name="Clicked on a color shade",
attachment_type=AttachmentType.PNG)
selected_color = inputElement.value_of_css_property('background-color')
print("Selected color RGB", selected_color)
time.sleep(5)
inputElement = driver.find_element_by_xpath("//div[@class='product-smart-gift__content']")
a = ActionChains(driver)
a.move_to_element(inputElement).perform()
allure.attach(driver.get_screenshot_as_png(), name="Clicked on TryItOn Button",
attachment_type=AttachmentType.PNG)
driver.find_element_by_xpath(
"//a[@class='js-youcambtn product-vto__btn button cta-vto jquery-once-1-processed']").click()
time.sleep(3)
iframe = driver.find_element_by_id("YMK-module-iframe")
driver.switch_to.frame(iframe)
frameElement = driver.find_element_by_xpath("//div[@class='frame-content']")
if frameElement.is_displayed():
inputElement = driver.find_element_by_xpath("//div[contains(text(),'LIVE CAMERA')]")
allure.attach(driver.get_screenshot_as_png(), name="Clicked on Camera Button",
attachment_type=AttachmentType.PNG)
inputElement.click()
# Verify Camera is enabled
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam")
else:
print("Webcam is opened")
wait.until(EC.visibility_of_element_located(
(By.XPATH, "//img[contains(@src,'bt-shot-cdfdef1fc81df7b156d4e5ff0b8c397c.png')]")))
time.sleep(3)
frameElement.screenshot(filename)
allure.attach(driver.get_screenshot_as_png(), name="Camera Screenshot saved",
attachment_type=AttachmentType.PNG)
with open(filename, 'rb') as image:
file = image.read()
byte_array = bytearray(file)
allure.attach(byte_array, name="Camera", attachment_type=AttachmentType.PNG)
assert True
except:
print("Something went wrong")
assert False
finally:
driver.quit()
return selected_color
# _____________________________________________________________________________________________________________________
def test_AR():
# get selected color and applied image file
filename = 'TryitOn.png'
selected_color = selenium_get_screenshot(filename)
# load image from file
pixels = pyplot.imread(filename)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
faces = detector.detect_faces(pixels)
# display faces on the original image
draw_image_with_boxes(filename, faces, selected_color)
|
999,171 | 4dc402b498528e4a0afe87d51497a954b24a3bcb | import logging
from typing import Tuple, Callable, Any, Generator, NamedTuple, Union, List
import numpy as np
import pandas as pd
from pandas_ml_common.sampling.cross_validation import PartitionedOnRowMultiIndexCV
from pandas_ml_common.utils import call_callable_dynamic_args, intersection_of_index, loc_if_not_none, exec_if_not_none
_log = logging.getLogger(__name__)
class XYWeight(NamedTuple):
x: pd.DataFrame
y: pd.DataFrame = None
weight: pd.DataFrame = None
def to_dict(self, loc=None):
d = {"x": self.x, "y": self.y, "weight": self.weight}
if loc is not None:
d = {k: loc_if_not_none(v, loc) for k, v in d.items()}
return d
class FoldXYWeight(NamedTuple):
epoch: int
fold: int
epoch_fold: int
x: pd.DataFrame
y: pd.DataFrame = None
weight: pd.DataFrame = None
class Sampler(object):
def __init__(
self,
frames: XYWeight,
splitter: Callable[[Any], Tuple[pd.Index, pd.Index]] = None,
filter: Callable[[Any], bool] = None,
cross_validation: Union['BaseCrossValidator', Callable[[Any], Generator[Tuple[np.ndarray, np.ndarray], None, None]]] = None,
epochs: int = 1,
batch_size: int = None,
fold_epochs: int = 1,
on_start: Callable = None,
on_epoch: Callable = None,
on_batch: Callable = None,
on_fold: Callable = None,
on_fold_epoch: Callable = None,
after_epoch: Callable = None,
after_batch: Callable = None,
after_fold: Callable = None,
after_fold_epoch: Callable = None,
after_end: Callable = None,
**kwargs
):
self.common_index = intersection_of_index(*frames).sort_values()
self.frames = XYWeight(*[loc_if_not_none(f, self.common_index) for f in frames])
self.epochs = epochs
self.batch_size = batch_size
self.fold_epochs = fold_epochs
self.splitter = splitter
self.filter = filter
# callbacks
self.on_start = on_start
self.on_epoch = on_epoch
self.on_batch = on_batch
self.on_fold = on_fold
self.on_fold_epoch = on_fold_epoch
self.after_epoch = after_epoch
self.after_batch = after_batch
self.after_fold = after_fold
self.after_fold_epoch = after_fold_epoch
self.after_end = after_end
# split training and test data
if self.splitter is not None:
if isinstance(self.common_index, pd.MultiIndex):
_log.warning("The Data provided uses a `MultiIndex`, eventually you want to set the "
"`partition_row_multi_index` parameter in your splitter")
self.train_idx, self.test_idx = call_callable_dynamic_args(
self.splitter, self.common_index, **self.frames.to_dict())
else:
self.train_idx, self.test_idx = self.common_index, pd.Index([])
if cross_validation is not None:
if isinstance(self.common_index, pd.MultiIndex) and not isinstance(cross_validation, PartitionedOnRowMultiIndexCV):
# cross validators need to fold within each group of a multi index row index, a wrapper can be provided
_log.warning("The Data provided uses a `MultiIndex` but the cross validation is not wrapped in "
"`PartitionedOnRowMultiIndexCV`")
if epochs is None or epochs > 1:
_log.warning(f"using epochs > 1 together with cross folding may lead to different folds for each epoch!"
f"{cross_validation}")
self.nr_folds = cross_validation.get_n_splits() if hasattr(cross_validation, "get_n_splits") else -1
self.cross_validation = cross_validation.split if hasattr(cross_validation, "split") else cross_validation
else:
self.nr_folds = None
self.cross_validation = None
def with_callbacks(
self,
on_start: Callable = None,
on_epoch: Callable = None,
on_batch: Callable = None,
on_fold: Callable = None,
on_fold_epoch: Callable = None,
after_epoch: Callable = None,
after_batch: Callable = None,
after_fold: Callable = None,
after_fold_epoch: Callable = None,
after_end: Callable = None,
):
return Sampler(
self.frames,
self.splitter,
self.filter,
self.cross_validation,
self.epochs,
self.batch_size,
self.fold_epochs,
on_start,
on_epoch,
on_batch,
on_fold,
on_fold_epoch,
after_epoch,
after_batch,
after_fold,
after_fold_epoch,
after_end
)
def sample_for_training(self) -> Generator[FoldXYWeight, None, None]:
cross_validation = self.cross_validation if self.cross_validation is not None else lambda x: [(None, None)]
# filter samples
if self.filter is not None:
train_idx = [idx for idx in self.train_idx if call_callable_dynamic_args(self.filter, idx, **self.frames.to_dict(idx))]
else:
train_idx = self.train_idx
# update frame views
train_frames = XYWeight(*[loc_if_not_none(f, train_idx) for f in self.frames])
test_frames = XYWeight(*[loc_if_not_none(f, self.test_idx) for f in self.frames])
# call for start ...
call_callable_dynamic_args(
self.on_start,
epochs=self.epochs, batch_size=self.batch_size, fold_epochs=self.fold_epochs,
features=exec_if_not_none(lambda x: x.columns.tolist(), self.frames.x),
labels=exec_if_not_none(lambda y: y.columns.tolist(), self.frames.y),
cross_validation=self.nr_folds is not None)
# generate samples
for epoch in (range(self.epochs) if self.epochs is not None else iter(int, 1)):
call_callable_dynamic_args(self.on_epoch, epoch=epoch)
fold_iter = enumerate(call_callable_dynamic_args(cross_validation, train_idx, **train_frames.to_dict()))
for fold, (cv_train_i, cv_test_i) in fold_iter:
call_callable_dynamic_args(self.on_fold, epoch=epoch, fold=fold)
# if we dont have any cross validation the training and test sets stay unchanged
cv_train_idx = train_idx if cv_train_i is None else train_idx[cv_train_i]
# build our test data sets
if cv_test_i is not None:
if cv_test_i.ndim > 1:
cv_test_frames = [
XYWeight(*[loc_if_not_none(f, train_idx[cv_test_i[:, i]]) for f in self.frames])
for i in range(cv_test_i.shape[1])
]
else:
cv_test_frames = [
XYWeight(*[loc_if_not_none(f, train_idx[cv_test_i]) for f in self.frames])]
else:
if len(self.test_idx) <= 0:
cv_test_frames = []
else:
cv_test_frames = [XYWeight(*[loc_if_not_none(f, self.test_idx) for f in self.frames])]
for fold_epoch in range(self.fold_epochs):
call_callable_dynamic_args(self.on_fold, epoch=epoch, fold=fold, fold_epoch=fold_epoch)
# build our training data sets aka batches
cv_train_frames = XYWeight(*[loc_if_not_none(f, cv_train_idx) for f in self.frames])
# theoretically we could already yield cv_train_frames, cv_test_frames
# but lets create batches first and then yield all together
nr_instances = len(cv_train_idx)
nice_i = max(nr_instances - 2, 0)
bs = min(nr_instances, self.batch_size) if self.batch_size is not None else nr_instances
batch_iter = range(0, nr_instances, bs)
for i in batch_iter:
call_callable_dynamic_args(self.on_batch, epoch=epoch, fold=fold, fold_epoch=fold_epoch, batch=i)
yield FoldXYWeight(epoch, fold, fold_epoch, *(f.iloc[i if i < nice_i else i - 1:i + bs] if f is not None else None for f in cv_train_frames))
call_callable_dynamic_args(self.after_batch, epoch=epoch, fold=fold, fold_epoch=fold_epoch, batch=i)
# end of fold epoch
try:
call_callable_dynamic_args(self.after_fold_epoch, epoch=epoch, fold=fold, fold_epoch=fold_epoch, train_data=cv_train_frames, test_data=cv_test_frames)
except StopIteration as sie:
call_callable_dynamic_args(self.after_fold, epoch=epoch, fold=fold, train_data=cv_train_frames, test_data=cv_test_frames)
if str(sie).isnumeric() and int(str(sie)) == fold:
# we just want to stop this fold
break
else:
# we need to stop any further generation of sample and call all left callbacks
call_callable_dynamic_args(self.after_epoch, epoch=epoch, train_data=train_frames, test_data=test_frames)
call_callable_dynamic_args(self.after_end)
return
# end of fold
call_callable_dynamic_args(self.after_fold, epoch=epoch, fold=fold, train_data=cv_train_frames, test_data=cv_test_frames)
# end of epoch
call_callable_dynamic_args(self.after_epoch, epoch=epoch, train_data=train_frames, test_data=test_frames)
# end of generator
call_callable_dynamic_args(self.after_end)
def get_in_sample_features(self) -> pd.DataFrame:
return self.frames.x.loc[self.train_idx]
def get_out_of_sample_features(self) -> pd.DataFrame:
return self.frames.x.loc[self.test_idx]
|
999,172 | a6d5faa7b7cb27185b88e8e8c541c5bb3040923d |
# Importing necessary dependencies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pyflux as pf
filename = "Arun_Valley_export.csv" #Name of file holding CSV data of stock values and Date
data = pd.read_csv(filename)
data['Date'] = pd.to_datetime(data['Date'])
#Converting Data in mmddyyyy into date_delta integer
data['date_delta'] = (data['Date'] - data['Date'].min()) / np.timedelta64(1,'D')
#Delcaring index of the data
data.index = data['date_delta']
#Formatting the graph and labeling the axis
plt.figure(figsize=(15,5))
plt.plot(data.index,data['LTP'])
plt.ylabel('Arun Valley Stock Values')
plt.ylabel('Date')
plt.title("Arun Valley Stock prediction using ARIMA model")
plt.show()
# Using Arima model
model = pf.ARIMA(data=data, ar=4, ma=4, target='LTP', family=pf.Normal())
x = model.fit("MLE")
#Printing the summary of the fit
x.summary()
#Predicting and plotting 20 values using past values
model.plot_predict(h=20,past_values=20,figsize=(15,5))
#Print the predicted values in terminal
print(model.predict(h=10,intervals=False))
|
999,173 | bad588d0b6fd4e8c1704d83f5cbd615eb8e201cc | import numpy as np
grid = 9424
ma = np.zeros([300,300])
for i in range(300):
for j in range(300):
x = i+1
y = j+1
ma[i,j] = int(((x+10)*y + grid) * (x+10)/100)%10-5
max_n = -45
for i in range(300-2):
for j in range(300-2):
su_n = 0
for k in range(3):
for l in range(3):
su_n = ma[i+k,j+l]+su_n
if su_n > max_n:
max_n = su_n
i_r = i+1
j_r = j+1
print(i_r,j_r) |
999,174 | 7771c610ac7b1f571f35e1ba919e376887bf8d68 | from libs.crawler import crawl
from bs4 import BeautifulSoup
import requests, re
'''
1.매출액-시총 >=0
2.(영업이익*10)배 - 시총 >=0
3.bps >=0 --- 0
4.bps-현재가 >=0 --- 0
5.(유보율:부채비율 = 5:1)<= 20%
6.이익잉여금 >=0
7.이익잉여금-시총 >=0
8.영업이익증가율 >=0
9.per <=10
10.roe >=0
11.roa >=0
12.pbr <=1
13.eps >=0
'''
url = 'https://finance.naver.com/item/main.nhn?code=005930'
# string = crawl(url)
# open('005930.html', 'w+').write(requests.get(url).text)
string = open('target_site_file/005930.html', encoding='utf-8').read()
replace_space = lambda x: re.sub("(\n|\t|\\xa0|,)", "", x)
def parse(string):
bsobj = BeautifulSoup(string, 'html.parser')
aside = bsobj.find('div', {'id':'aside'})
tab_con1 = aside.find('div', {'id':'tab_con1'})
pbr = 0
bps = 0
price_today = 0
sales = 0
net_income = 0
market_cap = 0
operating_profit = 0
# 최근 연간 매출
cop_analysis = bsobj.find('div', {'class':'section cop_analysis'})
tr_t_line = cop_analysis.find('thead').find_all('tr')[1].find_all('th')[2].text
last_year = replace_space(tr_t_line)
tbody_trs = cop_analysis.find('tbody').find_all('tr')
tbody_first_tr_tds = tbody_trs[0].find_all('td')
tbody_second_tr_tds = tbody_trs[1].find_all('td')
tbody_third_tr_tds = tbody_trs[2].find_all('td')
sales = float(replace_space(tbody_first_tr_tds[2].text))
# operating profit 영업이익
operating_profit = float(replace_space(tbody_second_tr_tds[2].text))
# 당기순이익
net_income = float(replace_space(tbody_third_tr_tds[2].text))
# 시가총액
div_first_tbody_trs = tab_con1.find('div', {'class':'first'}).find_all('tr')
market_cap = re.sub('(\t|\n)','',div_first_tbody_trs[0].find('td').text)
print(market_cap)
# 현재가sdf
try:
price_today = bsobj.find('p', {'class':'no_today'}).find('span', {'class':'blind'}).text.replace(',','')
price_today = float(price_today)
except Exception as e:
print(e)
try:
per_table = tab_con1.find('table', {'class': 'per_table'})
per_table_trs = per_table.find_all('tr')
ems = per_table_trs[2].find_all('em')
pbr = float(ems[0].text)
bps = float(ems[1].text.replace(',', ''))
except Exception as e:
print(e)
# 전년도 매출
return {'price_today':price_today, 'bps':bps, 'pbr':pbr, 'bps_minus_today_price':bps - price_today,
'sales{}'.format(last_year):sales*pow(10, 8), 'operating_profit{}'.format(last_year):
operating_profit * pow(10, 8), 'net_income':net_income * pow(10, 8)}
print(parse(string)) |
999,175 | e803b309ae80132bd8e0f10db1bd4a0093affe56 | from visual import *
from math import *
# background
alpha=0.1
shape=Polygon([(-0.5,-0.5),(-0.5,0.5),(0.5,0.5),(0.5,-0.5)])
path=[]
for i in range(181):
newx=20*cos(i*pi/180)
newz=alpha*20+20*sin(i*pi/180)
path.append((newx,0,newz))
for j in range(181):
newx=20*cos(pi+(j*pi)/180)
newz=-alpha*20-20*sin(j*pi/180)
path.append((newx,0,newz))
path.append((20,0,alpha*20))
bob=box(pos=(0,-1,0),size=(40,1,45),material=materials.wood)
tableup=extrusion(pos=path,shape=shape,material=materials.wood,color=color.cyan)
taiqiu=sphere(pos=(2,0.5,0),radius=0.5,color=color.white,make_trail=True,material=materials.emissive)
taiqiu.tho=1
taiqiu.the=0
taiqiu.vtho=sqrt(2)
taiqiu.vthe=pi/4
taiqiu.vx=taiqiu.vtho*cos(taiqiu.vthe)
taiqiu.vz=taiqiu.vtho*sin(taiqiu.vthe)
dt=0.01
while True:
rate(8000)
if -alpha*20<=taiqiu.z<=alpha*20:
if taiqiu.x>20 or taiqiu.x<-20:
taiqiu.vx=-taiqiu.vx
taiqiu.vthe=arctan(float(taiqiu.vz/taiqiu.vx))
if taiqiu.z>alpha*20:
if taiqiu.x**2+(taiqiu.z-alpha*20)**2>400:
the=arctan(float((taiqiu.z-alpha*20)/taiqiu.x))
taiqiu.vthe=pi-taiqiu.vthe+2*the
taiqiu.vx=taiqiu.vtho*cos(taiqiu.vthe)
taiqiu.vz=taiqiu.vtho*sin(taiqiu.vthe)
if taiqiu.z<-alpha*20:
if taiqiu.x**2+(taiqiu.z+alpha*20)**2>400:
the=arctan(float((taiqiu.z+alpha*20)/taiqiu.x))
taiqiu.vthe=pi-taiqiu.vthe+2*the
taiqiu.vx=taiqiu.vtho*cos(taiqiu.vthe)
taiqiu.vz=taiqiu.vtho*sin(taiqiu.vthe)
taiqiu.x=taiqiu.x+taiqiu.vx*dt
taiqiu.z=taiqiu.z+taiqiu.vz*dt
taiqiu.tho=sqrt(taiqiu.x**2+taiqiu.z**2)
taiqiu.the=arctan(float(taiqiu.z/taiqiu.x))
|
999,176 | ef9f7ccce6c7249b905f40ea5d89841a2a3e44fe | from django.shortcuts import render,redirect , get_object_or_404
from .models import Core_committee
def teams(request):
comittee=Core_committee.objects
return render(request, 'teams/team.html',{'committee':comittee})
|
999,177 | fb683bfcee3018dab15e95b8143df040cb6fe20c | import os
import pickle
import random
import numpy as np
import torch
from PIL import Image
from nltk.corpus import wordnet
from torchvision import transforms
from tqdm import tqdm
def get_img_paths(imagenet_dir='/hal9000/datasets/imagenet/image/'):
try:
image_paths = pickle.load(open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_image_paths.p', 'rb'))
wnids = pickle.load(open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_wnids.p', 'rb'))
except FileNotFoundError:
image_paths = []
wnids = []
for s in tqdm(os.listdir(imagenet_dir)):
if os.path.isdir(os.path.join(imagenet_dir, s)):
wnid_dir = os.path.join(imagenet_dir, s)
wnids.append(s)
for f in os.listdir(wnid_dir):
if f.endswith('.JPEG'):
image_paths.append(f)
wnids = list(set(wnids))
pickle.dump(image_paths, open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_image_paths.p', 'wb'))
pickle.dump(wnids, open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_wnids.p', 'wb'))
return image_paths, wnids
def get_wnid2img_path_idx(image_paths):
try:
wnid2img_path_idx = pickle.load(open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_wnid2img_path_idx.p', 'rb'))
except FileNotFoundError:
wnid2img_path_idx = {}
for i in range(len(image_paths)):
wnid = image_paths[i].split('_')[0]
if wnid not in wnid2img_path_idx:
wnid2img_path_idx[wnid] = [i]
else:
wnid2img_path_idx[wnid].append(i)
pickle.dump(wnid2img_path_idx, open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_wnid2img_path_idx.p', 'wb'))
return wnid2img_path_idx
def get_word2wnids(wnids):
try:
word2wnids = pickle.load(open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_word2wnids.p', 'rb'))
except FileNotFoundError:
word2wnids = {}
for wnid in wnids:
ss = wordnet.synset_from_pos_and_offset(pos='n', offset=int(wnid[1:]))
for lemma in ss.lemmas():
word = lemma.name()
if word not in word2wnids:
word2wnids[word] = [wnid]
else:
word2wnids[word].append(wnid)
pickle.dump(word2wnids, open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_word2wnids.p', 'wb'))
return word2wnids
def get_word2img_idx(word2wnids, wnid2img_path_idx):
try:
word2img_idx = pickle.load(open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_word2img_idx.p', 'rb'))
except FileNotFoundError:
valid_img_words = [word for word in word2wnids.keys()]
word2img_idx = {}
for word in valid_img_words:
img_path_idx = []
for wnid in word2wnids[word]:
img_path_idx += wnid2img_path_idx[wnid]
word2img_idx[word] = img_path_idx
pickle.dump(word2img_idx, open('/h/19/jadeleiyu/frame_extension/data/img/imagenet_word2img_idx.p', 'wb'))
return word2img_idx
def compute_visual_representations(noun2idx, image_paths, word2img_idx, img_sample_size=64):
img_dir = '/hal9000/datasets/imagenet/image/'
preprocess = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
idx2noun = {noun2idx[noun]: noun for noun in noun2idx.keys()}
x_means = []
num_exps = 0
for i in tqdm(range(len(idx2noun)), position=0, leave=True):
noun = idx2noun[i]
paths = [image_paths[j] for j in word2img_idx[noun]]
sampled_paths = random.sample(paths, img_sample_size)
xs = []
for image_path in sampled_paths:
try:
wnid = image_path.split('_')[0]
img_fn = os.path.join(img_dir, wnid, image_path)
raw_img = Image.open(img_fn)
if len(np.array(raw_img).shape) == 3:
x = preprocess(raw_img)
else:
x = torch.rand(3, 224, 224)
except Exception as e:
num_exps += 1
x = torch.rand(3, 224, 224)
xs.append(x)
x_mean = torch.mean(torch.stack(xs), dim=0)
x_means.append(x_mean)
x_means = torch.stack(x_means)
torch.save(x_means, '/h/19/jadeleiyu/frame_extension/data/img/noun_image_means.pt')
print("{} out of {} support words have invalid imagenet representations".format(num_exps, len(idx2noun)))
# return x_means
|
999,178 | 1b90288c4851159841d9cdd9ffac55bcc00c7479 | """
Question 12
Question:
Write a program, which will find all such numbers between 1000 and 3000 (both included)
such that each digit of the number is an even number.The numbers obtained should be printed
in a comma-separated sequence on a single line.
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.
"""
lst = [str(i) for i in range(1000, 3001)]
# using lambda to define function inside filter function
lst = list(filter(lambda i: all(ord(j) % 2 == 0 for j in i), lst))
print(",".join(lst))
|
999,179 | 90156693c03746423b486b14eda69bb8a3f9c4a9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-10-16 20:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('smarthome', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BinarySensorData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_created=True, auto_now_add=True, db_index=True, verbose_name='When was this entry created at')),
('source', models.CharField(max_length=128, verbose_name='Where is this entry from')),
('sensor_id', models.IntegerField(db_index=True)),
('binary_state', models.SmallIntegerField()),
('real_sensor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smarthome.Sensor', verbose_name='Which sensor is this entry associated to')),
('room', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='smarthome.Room', verbose_name='Which room is this entry associated to')),
],
options={
'ordering': ('sensor_id', 'created_at'),
},
),
]
|
999,180 | 39b0e79699167704fc080c00adaf41869e14d385 | #-* coding:UTF-8 -*
#!/usr/bin/env python
import threading
class mythread(threading.Thread):
def __init__(self, num):
threading.Thread.__init__(self)
self.num = num
def run(self):
print 'I am', self.num
t1 = mythread(1)
t2 = mythread(2)
t3 = mythread(3)
t1.start()
t2.start()
t3.start()
|
999,181 | 0985717b7344df1ba53fce97491a2b95640de963 | from scipy import stats
import numpy as np
n = int(input())
arr = list(map(int, input().rstrip().split()))
a = sorted(arr)
sum = 0
for i in a:
sum +=i
print(sum/n)
#print(np.mean(arr)) print mean
length = int(len(a)/2)
x4 = a[length-1]
x5 = a[length]
median = (x4+x5)
print(median/2) #calulating median
#print(np.median(arr)) //also prints median
#ptint(statistics.median(arr)) importing statistics and printing fetchs same
print(int(stats.mode(arr)[0]))
'''dict ={}
arr = [4978, 11735,14216,4978]
for i in range(4):
dict[i] = arr[i]
for k,v in dict.items():
if v in dict:
p
for i in range(3):
dict[i] = arr[i]
for k,v in dict.items():
if v in dict:'''
|
999,182 | 34f106c35ef5de2013d0f80af745a0f835a975e3 | numero = abs(int(input("Digite um número: ")))
for i in range(0, numero + 1):
print(i)
|
999,183 | 46f6af88cb685469beb2d863f863a69a532d815d | n = int(input())
if n == 0:
print(0)
exit(0)
mylist = []
for _ in range(n):
mylist.append(list(map(int, input().split())))
mylist = sorted(mylist, key = lambda x: x[0], reverse = True)
maxday = max(list(zip(*mylist))[1])
lec = [0]*(maxday+1)
for idx in range(n):
for i in range(maxday, 0, -1):
if mylist[idx][1] >= i and lec[i] == 0:
lec[i] = mylist[idx][0]
break
print(sum(lec))
|
999,184 | f83ba4a60bd5f7ba87ae5ef5031f49c02a8a184c | import numpy as np
class ex_1_1_33:
def productoPunto(self,x,y):
res=0
# print(len(x))
if (len(x)!=len(y)): return -1
for i in range(0,len(x)):
print(x[i]*y[i],i)
res+=x[i]*y[i]
print(res)
def productoMatricial(self,m1,m2):
# comprobacion m1.col==m2.row
if (m1.shape[1]!=m2.shape[0]): return -1
inner=m1.shape[1]
# creo la matriz resultante
res=np.zeros([m1.shape[0],m2.shape[1]])
print(res)
for row in range(0,res.shape[0]):
for col in range(0,res.shape[1]):
for k in range(0,inner):
res[row,col]+=m1[row,k]*m2[k,col]
print(res)
def traspuesta(self,m1):
mtraspuesta=np.zeros([m1.shape[1], m1.shape[0]])
print(mtraspuesta,m1.shape)
for i in range(0,m1.shape[0]):
for j in range(0,m1.shape[1]):
mtraspuesta[j,i]=m1[i,j]
print(mtraspuesta)
# return mtranspose
def productoMxV(self,m1,m2):
if(len(m2.shape)==1):
if (m1.shape[1]!=m2.shape[0]): return -1
# creo la matriz resultante
res=np.zeros([m1.shape[0],1])
inner=m1.shape[1]
print(res,res.shape)
for row in range(0,res.shape[0]):
# for col in range(0,res.shape[1]):
for k in range(0,inner):
res[row,0]+=m1[row,k]*m2[k]
print(res)
elif(len(m1.shape)==1):
if (m1.shape[0]!=m2.shape[0]): return -1
# creo la matriz resultante
res=np.zeros([1,m2.shape[1]])
print(res)
inner=m2.shape[0]
for col in range(0,res.shape[1]):
for k in range(0,inner):
res[0,col]+=m1[k]*m2[k,col]
print(res)
# print(m1.shape,m2.shape,len(m2),len(m2.shape))
# comprobacion m1.col==m2.row
# if (m1.shape[1]!=m2.shape[0]): return -1
# inner=m1.shape[1]
# # creo la matriz resultante
# res=np.zeros([m1.shape[0],m2.shape[1]])
# print(res)
# for row in range(0,res.shape[0]):
# for col in range(0,res.shape[1]):
# for k in range(0,inner):
# res[row,col]+=m1[row,k]*m2[k,col]
# print(res)
def main(self):
# arr=np.array([[1,2,3],[4,5,6]])
# self.traspuesta(arr)
# producto punto
# x=np.array([1, 0.5,3])
# y=np.array([4,-4,1])
# self.productoPunto(x,y)
# producto matricial
# x=np.array([[1, 2],[4,5],[7,8]])
# y=np.array([[1,2,3],[0,5,2]])
# self.productoMatricial(x,y)
# producto MxN.Mx1
x=np.array([[1, 2,3],[4,5,6],[7,8,9]])
y=np.array([2,1,3])
self.productoMxV(x,y)
self.productoMxV(y,x)
programa=ex_1_1_33()
programa.main() |
999,185 | ba246d2c1e012e5bc6cf161226aafd33df69828b | import requests
import base64
from django.conf import settings
def get_content_informations(nodeId, token):
auth = bytes('Basic ', "utf-8")
headers = {'Accept': 'application/pdf' , 'Authorization' : auth + base64.b64encode(bytes(token, "utf-8"))}
results = []
try:
response = requests.get(settings.URL_CORE + settings.URL_NODES + "/" + nodeId, headers=headers)
result = response.json()
return result
except :
print("Error when querying the Alfresco API.")
def get_content_mimetype(nodeId, token):
auth = bytes('Basic ', "utf-8")
headers = {'Accept': 'application/pdf' , 'Authorization' : auth + base64.b64encode(bytes(token, "utf-8"))}
results = []
try:
response = requests.get(settings.URL_CORE + settings.URL_NODES + "/" + nodeId, headers=headers)
result = response.json()
return result['entry']['content']['mimeType']
except :
print("Error when querying the Alfresco API.")
def get_content(nodeId, token):
auth = bytes('Basic ', "utf-8")
headers = {'Accept': 'application/pdf' , 'Authorization' : auth + base64.b64encode(bytes(token, "utf-8"))}
default_params = "content?attachment=false"
try:
response = requests.get(settings.URL_CORE + settings.URL_NODES + "/" + nodeId + "/" + default_params, headers=headers)
return response
except :
print("Error when querying the Alfresco API.")
def post_node_children(nodeId, nameDocument, token):
auth = bytes('Basic ', "utf-8")
headers = {'Accept': 'application/json' , 'Content-Type': 'application/json', 'Authorization' : auth + base64.b64encode(bytes(token, "utf-8"))}
default_params = "children"
body = '{ "name" : "' + nameDocument + '" , "nodeType": "cm:content" }'
try:
response = requests.post(settings.URL_CORE + settings.URL_NODES + "/" + nodeId + "/" + default_params, headers=headers, data=body)
return response
except :
print("Error when querying the Alfresco API.")
def put_content_node(nodeId, path, token):
auth = bytes('Basic ', "utf-8")
headers = {'Accept': 'application/json' , 'Content-Type': 'application/octet-stream', 'Authorization' : auth + base64.b64encode(bytes(token, "utf-8"))}
default_params = "content?majorVersion=false"
data = open(path, 'rb').read()
try:
response = requests.put(settings.URL_CORE + settings.URL_NODES + "/" + nodeId + "/" + default_params, headers=headers, data=data)
return response
except :
print("Error when querying the Alfresco API.")
|
999,186 | c4111a6d06d7833ab7d443de1e772fbc92bb2e7c | from rest_framework import serializers
from .. import models
from .tag import TagsSerializer
class EnvironmentDetailSerializer(serializers.ModelSerializer):
client = serializers.StringRelatedField()
tags = TagsSerializer(read_only=True, many=True)
tags_id = serializers.PrimaryKeyRelatedField(
queryset=models.Tag.objects.all(),
source='tags',
write_only=True,
many=True
)
class Meta:
model = models.Environment
fields = (
'id', 'name', 'client', 'tags', 'tags_id'
)
class EnvironmentSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='version_control:environments-detail', format='html', lookup_field='pk')
tags_id = serializers.PrimaryKeyRelatedField(
write_only=True,
queryset=models.Tag.objects.all(),
source="tags",
many=True,
required=False
)
tags = TagsSerializer(read_only=True, many=True)
class Meta:
model = models.Environment
fields = (
'id', 'url', 'name', 'client', 'tags_id', 'tags'
)
|
999,187 | ccc7bb4479e1faf65a334543d49fa7ff30097603 | from flaskel.ext import default
database = default.Database()
|
999,188 | 4c4075b71b39896e0235a27fbd7ffb93554da953 | import turtle
import math
import random
def draw():
window = turtle.Screen()
window.bgcolor("black")
denny = turtle.Turtle()
denny.color("white")
denny.speed(0)
rotate=int(360)
def draw_circle(t, size):
for i in range(10):
t.circle(size)
size-=4
def drawUnique(t, size,repeat):
for i in range(repeat):
draw_circle(t, size)
t.right(360/repeat)
drawUnique(denny, 80, 10)
denny = turtle.Turtle()
denny.color("cyan")
denny.speed(0)
rotate=int(360)
def draw_circle(t, size):
for i in range(10):
t.circle(size)
size-=3
def drawUnique(t, size,repeat):
for i in range(repeat):
draw_circle(t, size)
t.right(360/repeat)
drawUnique(denny, 60, 10)
denny = turtle.Turtle()
denny.color("yellow")
denny.speed(0)
rotate=int(360)
def draw_circle(t, size):
for i in range(10):
t.circle(size)
size-=2
def drawUnique(t, size,repeat):
for i in range(repeat):
draw_circle(t, size)
t.right(360/repeat)
drawUnique(denny, 40, 10)
draw()
|
999,189 | 70bc5e2b840ec438209fa9ff1226e85547f26a9e | import matplotlib
matplotlib.use('Agg')
import numpy as np
import pylab as plt
import csv
import datetime
times = []
repos = {}
fname='loc.txt'
with open(fname, 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='"')
for row in r:
print row
if int(row[1]) not in times:
times.append(int(row[1]))
if row[0] not in repos.keys():
repos[row[0]] = []
repos[row[0]].append(int(row[2]))
X = map(lambda x: datetime.datetime.fromtimestamp(x), times)
Xyear = map(lambda x: x.strftime('%b %Y'), X)
Y = []
labels = []
for key,value in repos.iteritems():
Y.append(value)
labels.append(key)
plt.stackplot(X, *Y, baseline="zero", labels=labels)
plt.title("MirageOS 3 lines of code")
plt.legend(loc=(0.05,0.55))
plt.annotate('v1.0 released', fontsize='x-small', xy=(0.25, 0.4), xycoords='axes fraction', textcoords='axes fraction', xytext=(0.05, 0.50),
arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=8))
plt.annotate('v2.0 released', fontsize='x-small', xy=(0.43, 0.71), xycoords='axes fraction', textcoords='axes fraction', xytext=(0.35, 0.85),
arrowprops=dict(facecolor='black', shrink=0.05, width=0.5, headwidth=8))
plt.xticks(X, Xyear, rotation='vertical', fontsize='small')
plt.tight_layout()
plt.savefig("loc.pdf", format="pdf")
|
999,190 | be54afaea13b75afed7d2cc77d5cc2368311e46b | import random
import string
class Regex:
def generate_text(self, cost):
raise NotImplementedError
def make_nfa(self):
raise NotImplementedError
def to_int(self):
raise NotImplementedError
def __repr__(self):
return self.__str__()
class RegexASCII(Regex):
def __init__(self, c):
self._c = c
@property
def char(self):
return self._c
def __str__(self):
return str(self._c)
def generate_text(self, cost):
return str(self._c)
class RegexOr(Regex):
def __init__(self, r1, r2):
assert isinstance(r1, Regex)
assert isinstance(r2, Regex), "%s is not a regex" % (r2, )
self.r1 = r1
self.r2 = r2
def __str__(self):
return "(%s|%s)" % (self.r1, self.r2)
def generate_text(self, cost):
if random.getrandbits(1) == 0:
return self.r1.generate_text(cost)
else:
return self.r2.generate_text(cost)
class RegexSequence(Regex):
def __init__(self, r1, r2):
assert isinstance(r1, Regex)
assert isinstance(r2, Regex), "%s is not a regex" % (r2, )
self.r1 = r1
self.r2 = r2
def __str__(self):
return "%s%s" % (self.r1, self.r2)
def generate_text(self, cost):
return self.r1.generate_text(cost) + self.r2.generate_text(cost)
class StarRegex(Regex):
def __init__(self, r):
assert isinstance(r, Regex)
self.r = r
def __str__(self):
return "<%s>* " % self.r
def generate_text(self, cost):
n = random.randint(0, cost)
"".join([r.generate_text() for _ in range(n)])
def regex_mk_random(num_ors, num_stars, num_seq, min_ascii_in_chunk, max_ascii_in_chunk):
assert min_ascii_in_chunk > 0
assert max_ascii_in_chunk > 0
assert max_ascii_in_chunk >= min_ascii_in_chunk
if num_ors == 0 and num_stars == 0 and num_seq == 0:
chars = string.ascii_letters + string.digits + string.punctuation
asciis = [RegexASCII(random.choice(chars)) for _ in range(random.randint(min_ascii_in_chunk, max_ascii_in_chunk))]
assert len(asciis) > 0
if len(asciis) == 1:
return asciis[0]
assert len(asciis) >= 2
seq = RegexSequence(asciis[0], asciis[1])
asciis = asciis[2:]
for x in asciis:
seq = RegexSequence(seq, x)
return seq
choices = []
if num_ors > 0:
choices.append(RegexOr(regex_mk_random(num_ors - 1, num_stars, num_seq, min_ascii_in_chunk, max_ascii_in_chunk),
regex_mk_random(num_ors - 1, num_stars, num_seq, min_ascii_in_chunk, max_ascii_in_chunk)))
if num_stars > 0:
choices.append(StarRegex(regex_mk_random(num_ors, num_stars - 1, num_seq, min_ascii_in_chunk, max_ascii_in_chunk)))
if num_seq > 0:
choices.append(RegexSequence(regex_mk_random(num_ors, num_stars, num_seq - 1, min_ascii_in_chunk, max_ascii_in_chunk),
regex_mk_random(num_ors, num_stars, num_seq - 1, min_ascii_in_chunk, max_ascii_in_chunk)))
return regex_prune(random.choice(choices))
def regex_prune(regex):
if isinstance(regex, RegexASCII):
return regex
elif isinstance(regex, StarRegex):
if isinstance(regex.r, StarRegex):
return regex_prune(regex.r)
else:
return StarRegex(regex_prune(regex.r))
elif isinstance(regex, RegexSequence) or isinstance(regex, RegexOr):
return RegexSequence(regex_prune(regex.r1), regex_prune(regex.r2))
|
999,191 | b6d00271e19fe0a69c1ab90b7402c273a43ef9ba | from django.test import TestCase
from corehq.apps.linked_domain.models import DomainLink
from corehq.apps.fixtures.models import (
LookupTable,
LookupTableRow,
TypeField,
Field,
)
from corehq.apps.linked_domain.exceptions import UnsupportedActionError
from corehq.apps.linked_domain.updates import update_fixture
class TestUpdateFixturesReal(TestCase):
def test_update_creates_new_synced_fixture(self):
self._create_table(self.upstream_domain, 'test-table', ['col_1'], [{'col_1': 'one'}, {'col_1': 'two'}])
update_fixture(self.link, 'test-table')
created_table = LookupTable.objects.by_domain_tag(self.downstream_domain, 'test-table')
self.assertEqual(created_table.tag, 'test-table')
self.assertTrue(created_table.is_synced)
self.assertColumnsEqual(created_table, ['col_1'])
self.assertTableFieldsEqual(created_table, [{'col_1': 'one'}, {'col_1': 'two'}])
def test_syncs_existing_fixture(self):
upstream_cols = ['col_1']
downstream_cols = ['col_2']
upstream_rows = [{'col_1': 'one'}]
downstream_rows = [{'col_2': 'two'}]
self._create_table(self.upstream_domain, 'test-table', upstream_cols, upstream_rows)
self._create_table(self.downstream_domain, 'test-table', downstream_cols, downstream_rows, is_synced=True)
update_fixture(self.link, 'test-table')
created_table = LookupTable.objects.by_domain_tag(self.downstream_domain, 'test-table')
self.assertColumnsEqual(created_table, upstream_cols)
self.assertTableFieldsEqual(created_table, upstream_rows)
def test_update_raises_error_on_unsynced_duplicate_name(self):
self._create_table(self.upstream_domain, 'test-table', ['col_1'], [])
self._create_table(self.downstream_domain, 'test-table', ['col_2'], [], is_synced=False)
with self.assertRaisesMessage(UnsupportedActionError,
'Failed to push Lookup Table "test-table" due to matching (same Table ID) unlinked Lookup Table'
' in the downstream project space. Please edit the Lookup Table to resolve the matching or click'
' "Push & Overwrite" to overwrite and link them.'):
update_fixture(self.link, 'test-table')
def test_produces_pull_message(self):
self._create_table(self.upstream_domain, 'test-table', ['col_1'], [])
self._create_table(self.downstream_domain, 'test-table', ['col_2'], [], is_synced=False)
with self.assertRaisesMessage(UnsupportedActionError,
'Failed to sync Lookup Table "test-table" due to matching (same Table ID) unlinked Lookup Table'
' in the downstream project space. Please edit the Lookup Table to resolve the matching or click'
' "Sync & Overwrite" to overwrite and link them.'):
update_fixture(self.link, 'test-table', is_pull=True)
def test_force_update_overwrites_conflicting_duplicate_name(self):
upstream_cols = ['col_1']
downstream_cols = ['col_2']
upstream_rows = [{'col_1': 'one'}]
downstream_rows = [{'col_2': 'two'}]
self._create_table(self.upstream_domain, 'test-table', upstream_cols, upstream_rows)
self._create_table(self.downstream_domain, 'test-table', downstream_cols, downstream_rows)
update_fixture(self.link, 'test-table', overwrite=True)
created_table = LookupTable.objects.by_domain_tag(self.downstream_domain, 'test-table')
self.assertColumnsEqual(created_table, upstream_cols)
self.assertTableFieldsEqual(created_table, upstream_rows)
def test_syncing_local_table_raises_error(self):
self._create_table(self.upstream_domain, 'test-table', ['col_1'], [], is_global=False)
with self.assertRaisesMessage(UnsupportedActionError, "Found non-global lookup table 'test-table'"):
update_fixture(self.link, 'test-table')
def setUp(self):
self.downstream_domain = 'downstream'
self.upstream_domain = 'upstream'
self.link = DomainLink(linked_domain=self.downstream_domain, master_domain=self.upstream_domain)
def _create_table(self, domain, tag, col_names, rows, is_global=True, is_synced=False):
columns = [TypeField(name=col_name) for col_name in col_names]
table = LookupTable.objects.create(
domain=domain, tag=tag, fields=columns, is_global=is_global, is_synced=is_synced)
for i, row in enumerate(rows):
fields = {key: [Field(value=val)] for (key, val) in row.items()}
LookupTableRow.objects.create(domain=domain, table_id=table.id, fields=fields, sort_key=i)
return table
def assertColumnsEqual(self, table, expected_column_names):
cols = [col.name for col in table.fields]
self.assertEqual(cols, expected_column_names)
def assertTableFieldsEqual(self, table, expected_field_values):
rows = LookupTableRow.objects.filter(domain=table.domain, table_id=table.id)
field_values = [row.fields_without_attributes for row in rows]
self.assertListEqual(field_values, expected_field_values)
|
999,192 | 00c075e36eed82a4847a70750934edf5dd15ffbc | import numpy as np
class Calculator:
@staticmethod
def nozzle_area(nozzle_diam):
return np.pi * ((nozzle_diam/2) ** 2)
#check valid 10/06/18 - Thomas Slusser
@staticmethod
def potential_height(mass, height, velocity):
#return height + 0.5 * mass * velocity ** 2
return height + (0.5*(velocity**2)/abs(9.81))
#check valid 10/06/18 - Thomas Slusser
@staticmethod
def exit_velocity(pressure, pipe_height):
return np.sqrt(2 * ((pressure / 997) + -9.81 * pipe_height))
#return np.sqrt(2* (pressure - (997*9.81*pipe_height)))
# assumed correct due to work with Chandler - 10/06/18
@staticmethod
def m_dot(nozzle_area, exit_velocity):
m_dot = 997 * nozzle_area * exit_velocity
if m_dot < 0:
m_dot = 0
return m_dot
@staticmethod
def duty_cycle(m_dot, m_dot_max):
duty_cycle = m_dot / m_dot_max
if duty_cycle > 1:
duty_cycle = 1
if duty_cycle < 0:
duty_cycle = 0
return duty_cycle
@staticmethod
def target_d_mass(mass, ue, target_dv, dt):
return mass * np.exp((-9.81 * dt / ue) - (target_dv / ue))
@staticmethod
def delta_v_required(delta_height):
return np.sqrt(abs(2 * 9.81 * delta_height))
@staticmethod
def modulus(a, b):
return round(((round(a % b, 5)) % b), 5)
|
999,193 | 817c82cefbe342e4c9fe2d476d8e1ab493f6c0b7 | from unittest.mock import MagicMock, patch
import pytest
from prereise.cli.data_sources.tests.conftest import (
CURRENT_DIRECTORY_FILEPATH,
STRING_DATE_2021_5_1,
STRING_DATE_2021_12_1,
TEXAS_REGION_LIST,
VALID_GRID_MODEL,
)
from prereise.cli.data_sources.wind_data import WindDataRapidRefresh
NO_IMPUTE = False
@pytest.fixture
def wind_data_object():
return WindDataRapidRefresh()
def test_winddata_end_date_before_start_date(wind_data_object):
with pytest.raises(AssertionError):
wind_data_object.extract(
TEXAS_REGION_LIST,
STRING_DATE_2021_12_1,
STRING_DATE_2021_5_1,
CURRENT_DIRECTORY_FILEPATH,
VALID_GRID_MODEL,
NO_IMPUTE,
)
@patch("prereise.cli.data_sources.wind_data.rap")
@patch("prereise.cli.data_sources.wind_data.Grid")
def test_winddata_happy_path(grid, rap, wind_data_object):
grid_mock = MagicMock()
wind_farms = MagicMock()
grid_mock.plant.groupby.return_value.get_group.return_value = wind_farms
data = MagicMock()
rap.retrieve_data.return_value = (data, [])
grid.return_value = grid_mock
wind_data_object.extract(
TEXAS_REGION_LIST,
STRING_DATE_2021_5_1,
STRING_DATE_2021_12_1,
CURRENT_DIRECTORY_FILEPATH,
VALID_GRID_MODEL,
NO_IMPUTE,
)
rap.retrieve_data.assert_called_with(
wind_farms, start_date=STRING_DATE_2021_5_1, end_date=STRING_DATE_2021_12_1
)
data.to_pickle.assert_called_with(CURRENT_DIRECTORY_FILEPATH)
@patch("prereise.cli.data_sources.wind_data.rap")
@patch("prereise.cli.data_sources.wind_data.Grid")
@patch("prereise.cli.data_sources.wind_data.logging")
@patch("prereise.cli.data_sources.wind_data.impute")
def test_winddata_missing_files(impute, logging, grid, rap, wind_data_object):
grid_mock = MagicMock()
wind_farms = MagicMock()
grid_mock.plant.groupby.return_value.get_group.return_value = wind_farms
data = MagicMock()
rap.retrieve_data.return_value = (data, [None, None])
grid.return_value = grid_mock
wind_data_object.extract(
TEXAS_REGION_LIST,
STRING_DATE_2021_5_1,
STRING_DATE_2021_12_1,
CURRENT_DIRECTORY_FILEPATH,
VALID_GRID_MODEL,
NO_IMPUTE,
)
rap.retrieve_data.assert_called_with(
wind_farms, start_date=STRING_DATE_2021_5_1, end_date=STRING_DATE_2021_12_1
)
data.to_pickle.assert_called_with(CURRENT_DIRECTORY_FILEPATH)
impute.gaussian.assert_called_with(data, wind_farms, inplace=True)
|
999,194 | 4993934cbe0f28ebfc60596187d5cb933a15c303 | import FWCore.ParameterSet.Config as cms
# Producer for Hybrid BasicClusters and SuperClusters
from RecoEgamma.PhotonIdentification.photonId_cfi import *
# photonID sequence
photonIDSequence = cms.Sequence(PhotonIDProd)
|
999,195 | c6bef7afda542f010d4ea2e87b8612d3db4d834a | # returns all numbers that can be written as the sum of "power" powers of their digits
def digit_power(power):
numbers = []
powers = {}
for digit in range(0, 10):
powers[digit] = digit ** power
# limit of one digit
digit_max = 9 ** power
# we can get a n-digit number from that
limit_digits = len(str(digit_max)) * digit_max
# so, the limit is:
limit = len(str(limit_digits)) * digit_max
for number in range(2, limit):
total = 0
for digit in str(number):
total += powers[int(digit)]
if number == total:
numbers.append(number)
return numbers
# tests
print(digit_power(4))
print(digit_power(5)) |
999,196 | 76fa78728a58c6053035de4d640250fbc06e4f3c | import MySQLdb
from cassandra.cluster import Cluster
db = MySQLdb.connect("localhost","root","root","Enmetric" )
cursor = db.cursor()
cluster = Cluster(['localhost'])
session = cluster.connect('enmetric')
cursor.execute("select IF(guid IS NULL,'NULL',guid) AS guid,power_avg,power_min,power_max,power_sum,num_measurements,occurred_at,channel_id,channel_number,node_id,node_hid,account_id, bridge_mac_addr,voltage_avg,current_avg,power_factor_avg,frequency_avg,status_avg,bridge_guid,node_guid,channel_guid,channel_profile_guid,device_type_guid,account_guid,user_guid,node_mac_addr,updated,created from minute_measurement where occurred_at < timestamp '2016-01-01 01:30:00' and occurred_at >= timestamp '2016-01-01 01:00:00'")
data = cursor.fetchall()
#print data
for row in data:
#print row
data = session.execute("Insert into minute_measurement (guid,power_avg,power_min,power_max,power_sum,num_measurements,occurred_at,channel_id,channel_number, node_id,node_hid,account_id,bridge_mac_addr,voltage_avg,current_avg,power_factor_avg,frequency_avg,status_avg,bridge_guid,node_guid, channel_guid,channel_profile_guid,device_type_guid,account_guid,user_guid,node_mac_addr,updated,created) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", [row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15], row[16], row[17], row[18], row[19], row[20], row[21], row[22], row[23], row[24], row[25], row[26], row[27]])
#cursor.execute('commit')
|
999,197 | f7c89af8d18e5432dfb3508b0016f9acd16427b9 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Marcelo Rodriguez, Brocade Communications Systems, Inc.
#
import os
import sys
import time
from oslo.config import cfg
from eventlet import greenthread
from neutron.db import api as db
from neutron.db import models_v2
from neutron.db.loadbalancer import loadbalancer_db as lb_models
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import exceptions
from neutron.common import utils as n_utils
from neutron.common import log
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_device_driver
from neutron.services.loadbalancer import constants as lb_const
from neutronclient.v2_0 import client as neutronclient
from novaclient.v1_1 import client as novaclient
from neutron.services.loadbalancer.drivers.brocade_vlb import (
brocade_adx_driver_impl as driver_impl,
brocade_adx_exceptions as adx_exception,
models as vlb_db,
driver as vlb_drv,
)
LOG = logging.getLogger(__name__)
DRIVER_NAME = 'brocade_vlb_driver'
cfg.CONF.register_opts([
cfg.StrOpt('deployment_model', help='The deployment mode'),
cfg.StrOpt('tenant_id', help='tenant id'),
cfg.StrOpt('tenant_admin_name', help='tenant admin username'),
cfg.StrOpt('tenant_admin_password', help='tenant admin password'),
cfg.StrOpt('auth_url', help='auth_url')
],"brocade")
cfg.CONF.register_opts([
cfg.StrOpt('flavor_id', help='Flavor id for the vADX'),
cfg.StrOpt('image_id', help='Image id of the vADX'),
cfg.StrOpt('management_network_id', help='Management network for the vADX'),
cfg.StrOpt('data_network_id', help='Data network for the vADX'),
cfg.StrOpt('username', help='Default username for the vADX'),
cfg.StrOpt('password', help='Default password for the vADX'),
cfg.IntOpt('nova_poll_interval', default=5,
help=_('Number of seconds between consecutive Nova queries '
'when waiting for loadbalancer instance status change.')),
cfg.IntOpt('nova_spawn_timeout', default=300,
help=_('Number of seconds to wait for Nova to activate '
'instance before setting resource to error state.')),
cfg.IntOpt('vlb_poll_interval', default=5,
help=_('Number of seconds between consecutive vLB '
'queries when waiting for router instance boot.')),
cfg.IntOpt('vlb_boot_timeout', default=300,
help=_('Number of seconds to wait for vLB to boot '
'before setting resource to error state.')),
],"brocade_vlb")
class AgentDeviceDriver(agent_device_driver.AgentDeviceDriver):
"""Abstract device driver that defines the API required by LBaaS agent."""
def __init__(self, conf, plugin_rpc):
LOG.debug("brocade_vlb_driver:: initialized")
self.conf = conf
self.plugin_rpc = plugin_rpc
@classmethod
def get_name(cls):
"""Returns unique name across all LBaaS device drivers."""
return DRIVER_NAME
@n_utils.synchronized('brocade-vlb-driver')
def deploy_instance(self, pool):
"""Fully deploys a loadbalancer instance from a given config."""
if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:
LOG.debug('This is an error')
return
name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))
nova_client = self._get_nova_client()
neutron_client = self._get_neutron_client()
subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])
LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)
vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,
self.conf.brocade_vlb.flavor_id,
nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },
{'net-id': subnet['subnet']['network_id'] }]
)
def _vLb_active():
while True:
try:
instance = nova_client.servers.get(vLb.id)
except Exception:
yield self.conf.brocade_vlb.nova_poll_interval
continue
LOG.info(_("vLB Driver::Load Balancer instance status: %s")
%instance.status)
if instance.status not in ('ACTIVE', 'ERROR'):
yield self.conf.brocade_vlb.nova_poll_interval
elif instance.status == 'ERROR':
raise InstanceSpawnError()
else:
break
self._wait(_vLb_active,
timeout=self.conf.brocade_vlb.nova_spawn_timeout)
LOG.info(_("vLB Driver::Waiting for the vLB app to initialize %s") %
vLb.id)
mgmt_ip = self._get_address(vLb,
self.conf.brocade_vlb.management_network_id)
data_ip = self._get_address(vLb, subnet['subnet']['network_id'])
vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,
data_ip, mgmt_ip)
# Now wait for vlb to boot
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.create_pool(pool['pool'])
impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])
impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])
impl.enable_source_nat()
except Exception as e:
LOG.debug('vLB Driver::Load Balancer instance %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB successfully deployed and configured"))
@n_utils.synchronized('brocade-vlb-driver')
def undeploy_instance(self, pool_id):
"""Fully undeploys the loadbalancer instance."""
LOG.debug('vLB Driver::undeploy_instance')
vlb_value = vlb_db.get_vlb_from_pool_id(pool_id['pool']['id'])
nova_client = self._get_nova_client()
instance = nova_client.servers.find(name=vlb_value['name'])
instance.delete()
vlb_db.delete_vlb(pool_id['pool']['id'])
def get_stats(self, pool_id):
LOG.debug('vLB Driver::get_stats')
def remove_orphans(self, known_pool_ids):
# Not all drivers will support this
raise NotImplementedError()
def create_vip(self, vip):
LOG.debug('vLB Driver::create_vip')
vlb = vlb_db.get_vlb_from_pool_id(vip['pool_id'])
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.create_vip(vip)
except Exception as e:
LOG.debug('vLB Driver::create_vip trying to connect to'
'vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish creating vip"))
def update_vip(self, old_vip, vip):
LOG.debug('vLB Driver::update_vip')
vlb = vlb_db.get_vlb_from_pool_id(vip['pool_id'])
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.update_vip(old_vip,vip)
except Exception as e:
LOG.debug('vLB Driver::update_vip trying to connect to'
' vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish updating vip"))
def delete_vip(self, vip):
LOG.debug('vLB Driver::delete_vip')
vlb = vlb_db.get_vlb_from_pool_id(vip['pool_id'])
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.delete_vip(vip)
except Exception as e:
LOG.debug('vLB Driver::delete_vip trying to connect to'
'vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish deleting vip"))
def create_pool(self, pool):
obj = {}
obj['pool']=pool
self.deploy_instance(obj)
def update_pool(self, old_pool, pool):
LOG.info('vLB Driver::update_pool')
LOG.debug('>>>>>>>>>>>>>>>>>>>>> %s' % pool)
vlb = vlb_db.get_vlb_from_pool_id(pool['id'])
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.update_pool(old_pool,pool)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::update_pool trying to connect to'
'vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish updating pool"))
def delete_pool(self, pool):
LOG.info('vLB Driver::delete_pool')
obj = {}
obj['pool'] = pool
self.undeploy_instance(obj)
@log.log
def create_member(self, member):
LOG.info('vLB Driver::create_member')
vlb = vlb_db.get_vlb_from_pool_id(member['pool_id'])
mgmt_ip = vlb['mgmt_ip']
vip = self._get_vip(member['pool_id'])
member['vip_id'] = vip['id']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.create_member(member)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::create_member trying to connect to'
' vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,
timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish creating member"))
def update_member(self, old_member, member):
LOG.info('vLB Driver::updating_member')
vlb = vlb_db.get_vlb_from_pool_id(member['pool_id'])
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.update_member(old_member,member)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::update_member trying to connect to'
'vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,
timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish updating member"))
def delete_member(self, member):
LOG.info('vLB Driver::delete_member')
vlb = vlb_db.get_vlb_from_pool_id(member['pool_id'])
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.delete_member(member)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::delete_member trying to connect to'
' vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,
timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish deleting member"))
def create_pool_health_monitor(self, health_monitor, pool_id):
LOG.info('vLB Driver::create_pool_health_monitor')
vlb = vlb_db.get_vlb_from_pool_id(pool_id)
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.create_health_monitor(health_monitor, pool_id)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::create_pool_health_monitor trying to'
' connect to vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish creating healthmonitor"))
def update_pool_health_monitor(self,
old_health_monitor,
health_monitor,
pool_id):
LOG.info('vLB Driver::update_pool_health_monitor')
vlb = vlb_db.get_vlb_from_pool_id(pool_id)
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.update_health_monitor(health_monitor,
old_health_monitor, pool_id)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::update_health_monitor trying to'
' connect to vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,
timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish updating healthmonitor"))
def delete_pool_health_monitor(self, health_monitor, pool_id):
LOG.info('vLB Driver::delete_pool_health_monitor')
vlb = vlb_db.get_vlb_from_pool_id(pool_id)
mgmt_ip = vlb['mgmt_ip']
def _vLb_soap():
while True:
try:
impl = driver_impl.BrocadeAdxDeviceDriverImpl(
self.conf.brocade_vlb.username,
self.conf.brocade_vlb.password,
mgmt_ip)
impl.delete_health_monitor(health_monitor, pool_id)
except UnsupportedFeature as e:
raise e
except Exception as e:
LOG.debug('vLB Driver::delete_pool_health_monitor trying '
' to connect to vLB - %s' % e)
yield self.conf.brocade_vlb.vlb_poll_interval
continue
break
self._wait(_vLb_soap,
timeout=self.conf.brocade_vlb.vlb_boot_timeout)
LOG.info(_("vLB Driver:vLB finish deleting health monitor"))
def _get_nova_client(self):
LOG.debug(_("brocade_vlb_driver::Get Nova client"))
return novaclient.Client(
self.conf.brocade.tenant_admin_name,
self.conf.brocade.tenant_admin_password,
None,
self.conf.brocade.auth_url,
service_type='compute',
tenant_id=self.conf.brocade.tenant_id)
def _get_neutron_client(self):
LOG.debug(_('brocade_vlb_driver::Get Neutron client'))
return neutronclient.Client(
username=self.conf.brocade.tenant_admin_name,
password=self.conf.brocade.tenant_admin_password,
tenant_id=self.conf.brocade.tenant_id,
auth_url=self.conf.brocade.auth_url)
def _wait(self, query_fn, timeout=0):
LOG.debug(_("brocade_vlb_driver:: Now we wait"))
end = time.time() + timeout
try:
for interval in query_fn():
greenthread.sleep(interval)
if timeout > 0 and time.time() >= end:
raise InstanceBootTimeout()
except Exception:
pass
def _get_address(self, instance, net_id):
session = db.get_session()
query = session.query(models_v2.Network)
network = query.filter(models_v2.Network.id == net_id).one()
address_map = instance.addresses[network['name']]
address = address_map[0]["addr"]
return address
def _get_vip(self, pool_id):
session = db.get_session()
query = session.query(lb_models.Vip)
vip = query.filter(lb_models.Vip.pool_id == pool_id).one()
return vip
class Wrap(object):
"""A light attribute wrapper for compatibility with the interface lib."""
def __init__(self, d):
self.__dict__.update(d)
def __getitem__(self, key):
return self.__dict__[key]
|
999,198 | 0ca98922eda0c34020eacad44f195e123d2cd553 | """
Este script se encarga de la lectura del archivo de configuracion .ini
leyendo cada una de las propiedades correspondiente a la conexion a
la base de datos.
Siendo así más centralizado y modular el programa.
"""
from configparser import ConfigParser
# Obtener archivo de configuracion y leer sus propiedades (retorna un diccionario)
def ReadConfig(archivo = 'config.ini', seccion = 'postgresql'):
# CREAR EL PARSER DEL ARCHIVO Y LEERLO
parser = ConfigParser()
parser.read(archivo)
# Obtener los datos de la seccion de configuracion (postregsql)
config = {}
if parser.has_section(seccion):
for param in parser.items(seccion):
config[param[0]] = param[1]
else:
raise Exception('Seccion {0} encontrada en el archivo {1}'.format(seccion, archivo))
return config |
999,199 | 6a1029882f8d4014a4f872dff54ca85cc161e8db | import logging
l = logging.getLogger("claripy.ops")
#
# AST creation
#
def AbstractLocation(*args, **kwargs): # pylint:disable=no-self-use
aloc = vsa.AbstractLocation(*args, **kwargs)
return aloc
#
# Some operations
#
#
# sigh
#
# pylint:disable=wildcard-import,unused-wildcard-import
from .ast.base import *
from .ast.bv import *
from .ast.fp import *
from .ast.bool import *
from .ast.strings import *
from . import vsa
VS = ValueSet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.