index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
3,400 | 3df57059539e5e3579c6dbee6be288e04b5f93b5 | import boto3
import jinja2
import markdown
# Instantiate S3 client
s3_client = boto3.client('s3')
# HTML style template
TEMPLATE = """<!DOCTYPE html>
<html>
<head>
<link href="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css" rel="stylesheet">
<style>
body {
font-family: sans-serif;
}
code, pre {
font-family: monospace;
}
h1 code,
h2 code,
h3 code,
h4 code,
h5 code,
h6 code {
font-size: inherit;
}
</style>
</head>
<body>
<div class="container">
{{content}}
</div>
</body>
</html>
"""
def lambda_handler(event, context):
for record in event['Records']:
# Extract bucket and key information from S3 PutObject event
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
output_key = '{}.html'.format(key[:key.rfind('.md')])
# Read Markdown file content from S3 bucket
response = s3_client.get_object(Bucket=bucket, Key=key)
md_content = response['Body'].read().decode('utf-8')
print("MD " + md_content)
# Convert Markdown content to HTML
extensions = ['extra', 'smarty']
html_content = markdown.markdown(md_content, extensions=extensions, output_format='html5')
html_content_fmt = jinja2.Template(TEMPLATE).render(content=html_content)
print("HTML " + html_content_fmt)
# Encode content before uploading
encoded_html = html_content_fmt.encode("utf-8")
# Upload HTML content to S3 bucket
s3_client.put_object(Bucket=bucket, Key=output_key, Body=encoded_html)
|
3,401 | 2ed9eafb6e26971f642d1e33cbb3d1f3df34990a | from gurobipy import *
import math
# params.NonConvex = 2
# K = 5
# R = {0: 1000, 1: 5000, 2: 10000, 3: 20000, 4: 69354} # imbalanced
# R = {0: 50, 1: 100, 2: 150, 3: 84, 4: 400} # imbalanced
# R = {0: 100, 1: 200, 2: 484} # imbalanced
# R = {0: 10, 1: 20, 2: 30, 3: 50, 4: 100} # imbalanced
# R = {0: 10, 1: 13, 2: 20, 3: 30, 4: 50} # imbalanced
# R = {0: 42, 1: 42, 2: 42, 3: 42, 4: 42} # balanced
# R_sum = sum(R.values())
# print('sum of R:', R_sum)
N = 897
# B = 100
# R = {0: 10, 1: 23, 2: 90}
# R_sum = sum(R.values())
# K = len(R)
# p = {0: 3, 1: 1.5, 2: 1.2, 3: 2, 4: 1, 5: 1.3, 6: 1, 7: 2, 8: 3, 9: 2}
p = {0: 3, 1: 1.5, 2: 1.2, 3: 2, 4: 1}
t_2 = 0
epsilon = 0.01
D = 1000
c0 = 20
d0 = 0.2
eta = 0.5
# L_max = 0.4561413560946449
# G = 0.02
# G = 0.08
# L_max = 0.09
# L_max = 1.167833240579852
# G = 0.49404299383897043
def solve_model(K, R, N, L_max, G):
print('parameters==| k=%d \t |R=%s \t |N=%d \t |eta=%f \t |L_max=%f \t |G=%f'% (K, R, N, eta, L_max, G))
R_sum = sum(R.values())
gamma = c0 * d0 * 1 / R_sum
alpha = eta * eta * G
beta = eta * D * L_max * math.sqrt(G)
lamda = eta * eta * epsilon * epsilon
print('parameters==| d0=%f \t |gamma=%s \t |alpha=%f \t |beta=%f \t |lamda=%f'% (d0, gamma, alpha, beta, lamda))
# Create a new model
m = Model("mip1")
# Create variables
T = m.addVar(vtype=GRB.INTEGER, name="T")
E = {}
for k in range(K):
E[k] = m.addVar(vtype=GRB.INTEGER, name="E")
t_max = m.addVar(vtype=GRB.CONTINUOUS, name="t_max")
a = m.addVar(vtype=GRB.CONTINUOUS, name="R_1")
b = m.addVar(vtype=GRB.CONTINUOUS, name="R_2")
c, mul, pow = {}, {}, {}
for k in range(K):
c[k] = m.addVar(vtype=GRB.CONTINUOUS, name="R_3")
mul[k] = m.addVar(vtype=GRB.CONTINUOUS, name="R_4")
pow[k] = m.addVar(vtype=GRB.CONTINUOUS, name="R_5")
d = m.addVar(vtype=GRB.CONTINUOUS, name="R_6")
E_min = m.addVar(vtype=GRB.INTEGER, name="E_min")
E_min_2 = m.addVar(vtype=GRB.INTEGER, name="E_min_2")
s = m.addVar(vtype=GRB.CONTINUOUS, name="R_sum")
# Set objective
m.setObjective(T * (t_max + t_2), GRB.MINIMIZE)
# Add constraint: T_max >= E_k * R_k
for k in range(K):
m.addConstr(t_max * p[k] >= gamma * R[k] * E[k], "c0")
# Add constraint: a == \sum E_k * E_k * R_k
m.addConstr(a == quicksum(E[k] * E[k] * R[k] for k in range(K)))
for k in range(K):
m.addConstr(mul[k] == E[k] * (E[k] - 1))
m.addConstr(c[k] * c[k] == R[k])
m.addConstr(pow[k] == mul[k] * c[k])
# m.addConstr(E[k] >=1)
m.addConstr(E_min <= E[k])
m.addConstr(b == quicksum((pow[k] * pow[k]) for k in range(K)))
m.addConstr(d * d == b)
m.addConstr(s == alpha * a + D * D + beta * d)
m.addConstr(E_min_2 == E_min * E_min)
# Add constraint: x + y >= 1 R_e <= epsilon
m.addConstr(s * s <= E_min_2 * T * 1, "c1")
m.params.NonConvex = 2
m.setParam("OutputFlag", 0)
m.params.TimeLimit = 40 # 限制求解时间为 100s
m.optimize()
local_epoch = []
if m.status == GRB.OPTIMAL:
print('solved!')
Ex = m.getAttr('x', E)
for k in range(K):
if E[k].x > 0.0001:
local_epoch.append(int(Ex[k]))
else:
local_epoch = [1 for i in range(K)]
print('Optimization solution:', local_epoch)
return local_epoch
# K = 5
# G = 0.23
# L_max = 1.48
# R = {0: 10, 1: 13, 2: 20, 3: 30, 4: 50} # imbalanced
# N = 30000
# eta = 0.5
# print('parameters:', K, R, N, eta, L_max, G)
# ll = solve_model(K, R, N, L_max, G)
|
3,402 | f125269d5b52da41734ce94683139c44f0c4a66a | """
Implements Single Instance Learning SVM
From https://github.com/garydoranjr/misvm/blob/master/misvm/sil.py
Modified by Nicolas
"""
from __future__ import print_function, division
import numpy as np
import inspect
from sklearn.svm import LinearSVC as SVM
from milsvm.util import slices
class SIL(SVM):
"""
Single-Instance Learning applied to MI data
"""
def __init__(self,C=1.0, scale_C=True,
verbose=True, sv_cutoff=1e-7, **kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if False [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
"""
self._bags = None
self._bag_predictions = None
self.scale_C = scale_C
self.verbose = verbose
self.sv_cutoff = sv_cutoff
self.C = C
self._X = None
self._y = None
self._objective = None
self._alphas = None
self._sv = None
self._sv_alphas = None
self._sv_X = None
self._sv_y = None
self._b = None
self._predictions = None
super(SIL, self).__init__(**kwargs)
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
svm_X = np.vstack(self._bags)
svm_y = np.vstack([float(cls) * np.matrix(np.ones((len(bag), 1)))
for bag, cls in zip(self._bags, y)])
super(SIL, self).fit(svm_X, svm_y)
def _compute_separator(self, K):
super(SIL, self)._compute_separator(K)
self._bag_predictions = _inst_to_bag_preds(self._predictions, self._bags)
def predict(self, bags, instancePrediction = None):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param instancePrediction : flag to indicate if instance predictions
should be given as output.
@return : an array of length n containing real-valued label predictions
(threshold at zero to produce binary predictions)
"""
if instancePrediction is None:
instancePrediction = False
bags = [np.asmatrix(bag) for bag in bags]
inst_preds = super(SIL, self).predict(np.vstack(bags))
if instancePrediction:
return _inst_to_bag_preds(inst_preds, bags), inst_preds
else:
return _inst_to_bag_preds(inst_preds, bags)
def get_params(self, deep=True):
"""
return params
"""
args, _, _, _ = inspect.getargspec(super(SIL, self).__init__)
args.pop(0)
return {key: getattr(self, key, None) for key in args}
def _inst_to_bag_preds(inst_preds, bags):
return np.array([np.max(inst_preds[slice(*bidx)])
for bidx in slices(map(len, bags))])
|
3,403 | 545053bc2b7c8687622d747673f2ad37b978014c | # Converts text to speech in different accents. Requires pip3 install gTTS
from gtts import gTTS
import os
language_code = """
Language Code
-------- ----
Afrikaans af
Albanian sq
Arabic ar
Belarusian be
Bulgarian bg
Catalan ca
Chinese Simplified zh-CN
Chinese Traditional zh-TW
Croatian hr
Czech cs
Danish da
Dutch nl
English en
Estonian et
Filipino tl
Finnish fi
French fr
Galician gl
German de
Greek el
Hebrew iw
Hindi hi
Hungarian hu
Icelandic is
Indonesian id
Irish ga
Italian it
Japanese ja
Korean ko
Latvian lv
Lithuanian lt
Macedonian mk
Malay ms
Maltese mt
Norwegian no
Persian fa
Polish pl
Portuguese pt
Romanian ro
Russian ru
Serbian sr
Slovak sk
Slovenian sl
Spanish es
Swahili sw
Swedish sv
Thai th
Turkish tr
Ukrainian uk
Vietnamese vi
Welsh cy
Yiddish yi
"""
print("We're going to speak anything you type in a different accent")
mytext = input("Please enter some text: ")
print(language_code)
language = input("Please select the accent: ")
# Passing the text and language to the engine
myobj = gTTS(text=mytext, lang=language, slow=True)
# Saving the converted audio in a mp3 file named texty
myobj.save("texty.mp3")
# It does create the file but doesnt play.
# Also, I wanted it to actually translate to a different language, but all it does is say it in a different accent!
os.system("mpg321 texty.mp3")
|
3,404 | 1deb070dd91c01190b70fa678add31ecb82f34fa | #Creating function
def name_of_function():
'''
Docstring explains function.
'''
return "Hello" #use return instead of print since return can be stored as a variable.
#Simple example
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
#This is a beginner move. x in y.lower() is already a boolean.
dog_check('Dog ran away')
#Expert move:
def dog_check(mystring):
return 'dog' in mystring.lower()
# *args
def myfunc(*args): #instead of myfunc(a,b,c,...) no limit of arguments and it will be treated as tuples.
return sum(args) * 0.05
myfunc(14,10,100)
# **kwargs # kwargs returns as a dictionary
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
myfunc(fruit='apple')
#Combination
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
myfunc(10,20,30,fruit='orange',food='eggs',animal='dog')
##BONUS Project
#Define a function called myfunc that takes in a string, and returns a matching string where every even letter is uppercase, n/
#and every odd letter is lowercase.
def myfunc(word):
result = ""
for index, letter in enumerate(word):
if index % 2 == 0:
result += letter.lower()
else:
result += letter.upper()
return result
myfunc('VictoriaSok')
|
3,405 | 4fbf5b4520aa4dca4c7cc80d56ba00f634d184bf | # -*- coding: utf-8 -*-
# упражнение выполнено на Python 3
manual_calc = 53 + 1.0/3
def trapezoidal(f, a, b, n):
h = float(b - a)/n
result = 0.5*(f(a) + f(b))
for i in range(1, n):
result += f(a + i*h)
result *= h
return result
def rectangular(f, a, b, n):
h = float(b - a)/n
result = f(a+0.5*h)
for i in range(1, n):
result += f(a + 0.5*h + i*h)
result *= h
return result
trap_2 = trapezoidal(lambda x: x * (x - 1), 2, 6, 2)
trap_100 = trapezoidal(lambda x: x * (x - 1), 2, 6, 100)
rect_2 = rectangular(lambda x: x * (x - 1), 2, 6, 2)
rect_100 = rectangular(lambda x: x * (x - 1), 2, 6, 100)
print('Точное значение интеграла: {}\n'.format(manual_calc))
print('Аппроксимация трапециями:\n 2 трапеции: {}\n 100 трапеций: {}'
.format(trap_2, trap_100))
print('Погрешность для аппроксимации трапециями:\n 2 трапеции: {}\n 100 трапеций: {}\n'
.format(abs(trap_2 - manual_calc), abs(trap_100 - manual_calc)))
print('Аппроксимация прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(rect_2, rect_100))
print('Погрешность для аппроксимации прямоугольниками:\n 2 прямоугольника: {}\n 100 прямоугольников: {}'
.format(abs(rect_2 - manual_calc), abs(rect_100 - manual_calc))) |
3,406 | 995dc34ea32de4566e2804b6797d9b551b733ff3 |
forbidden = [
'Key.esc', 'Key.cmd', 'Key.cmd_r', 'Key.menu',
'Key.pause', 'Key.scroll_lock', 'Key.print_screen',
'Key.enter', 'Key.space', 'Key.backspace', 'Key.ctrl_l',
'Key.ctrl_r', 'Key.alt_l', 'Key.alt_gr', 'Key.caps_lock',
'Key.num_lock', 'Key.tab', 'Key.shift', 'Key.shift_r',
'Key.insert', 'Key.delete', 'Key.home', 'Key.end',
'Key.page_up', 'Key.page_down', '/'
]
dict_ = {
' ':' ',
'Key.f1' : 'F1',
'Key.f2' : 'F2',
'Key.f3' : 'F3',
'Key.f4' : 'F4',
'Key.f5' : 'F5',
'Key.f6' : 'F6',
'Key.f7' : 'F7',
'Key.f8' : 'F8',
'Key.f9' : 'F9',
'Key.f10' : 'F10',
'Key.f11' : 'F11',
'Key.f12' : 'F12',
'<96>' : 'Num 0',
'<97>' : 'Num 1',
'<98>' : 'Num 2',
'<99>' : 'Num 3',
'<100>' : 'Num 4',
'<101>' : 'Num 5',
'<102>' : 'Num 6',
'<103>' : 'Num 7',
'<104>' : 'Num 8',
'<105>' : 'Num 9',
'<110>' : 'Num .',
'Key.up' : 'Up',
'Key.down' : 'Down',
'Key.left' : 'Left',
'Key.right' : 'Right',
'\\\\' : '\\'
}
|
3,407 | 153d37b58a10847aae1fa7dbec4c7576c3d97fb2 | import random
# library to create window in the terminal
import curses
# initialized curses by returning a window object
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(True)
curses.curs_set(0)
height, width = stdscr.getmaxyx()
# create a new window of a given size
window = curses.newwin(height, width, 0, 0)
window.keypad(1)
window.timeout(100)
# snake's form
snk_x = width/4
snk_y = height/2
# initialize snake's size to 3
snake = [
[snk_y, snk_x],
[snk_y, snk_x-1],
[snk_y, snk_x-2]
]
# food's size
food = [height/2, width/2]
# add first food in the window
window.addch(int(food[0]), int(food[1]), curses.ACS_PI)
# snake initializes direction to right
key = curses.KEY_RIGHT
# main of snake game
while True:
next_key = window.getch()
key = key if next_key == -1 else next_key
if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0] in snake[1:]:
curses.endwin()
quit()
new_head = [snake[0][0], snake[0][1]]
if key == curses.KEY_DOWN:
new_head[0] += 1
if key == curses.KEY_UP:
new_head[0] -= 1
if key == curses.KEY_LEFT:
new_head[1] -= 1
if key == curses.KEY_RIGHT:
new_head[1] += 1
snake.insert(0, new_head)
if snake[0] == food:
food = None
while food is None:
nf = [ random.randint(1, height-1), random.randint(1, width-1)]
food = nf if nf not in snake else None
window.addch(food[0], food[1], curses.ACS_PI)
else:
tail = snake.pop()
window.addch(int(tail[0]), int(tail[1]), ' ')
window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)
|
3,408 | afb0359f4cdf5ed32bb785d969e9bf8919bb6add | import os
import json
import csv
import re
import requests
import spacy
import nltk
from nltk.parse import CoreNLPParser
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
from time import time
nlp = spacy.load('es_core_news_sm')
from modules_api import conts_log
sw_spanish="./data/stop-esp.txt"
sw_english="./data/stop-eng.txt"
inner_spanish="./data/inner-stop-esp.txt"
inner_english="./data/inner-stop-eng.txt"
import stanza
### METODO PARA EL SERVICIO
'''
como el main de debajo. este método va a ser el controlador.
Mediante parámetros va a decidir qué procesos va a seguir
termList: array/lista de terminos
lang: string con el idoma : es, en
timeEx: booleano que activa si se aplica timex o no
patternBasedClean: booleano que activa si se aplican patrones o no
pluralClean: booleano que activa si se aplica limpieza de plurales o no
numbersClean: booleano que activa si se aplica limpieza de numeros o no
accentClean: booleano que activa si se aplica limpieza de acentos o no
'''
def preprocessing_terms(termlist, lang_in, timeEx, patternBasedClean, pluralClean, numbersClean, accentClean):
date='2020-06-03' # esto debería ser automatico
print('terms:', termlist)
print('lang:', lang_in)
# servicio básico, creo que se debería hacer siempre
processedTerms=clean_terms(termlist, lang_in)
print('This is processedTerms ')
print(processedTerms)
#print('this is timex' + timeEx)
# Todo siempre sobre la misma variable: processedTerms. Da igual el camino que cojas. Usas la lista de terminos y se modifica.
#opcional
if(timeEx==True):
processedTerms='| '.join(processedTerms).replace('-', '').replace(',', '').replace(';', '')
processedTerms=annotate_timex(processedTerms, date, lang_in)
processedTerms.sort()
#opcional
if((lang_in=='es') and (patternBasedClean==True)):
stanza.download('es')
pos_tagger=stanza.Pipeline('es')
processedTerms=delete_pattern(processedTerms, pos_tagger)
if((lang_in=='en') and (patternBasedClean==True)):
stanza.download('en')
pos_tagger=stanza.Pipeline('en')
processedTerms=delete_pattern(processedTerms, pos_tagger)
#opcional
if((lang_in=='es') and (pluralClean==True)):
processedTerms=quit_plural(processedTerms)
#opcional
if(numbersClean==True):
processedTerms=delete_numbers(processedTerms)
#opcional
if(accentClean==True):
processedTerms=acentos(processedTerms)
#final clean
processedTerms=clean_terms(processedTerms, lang_in)
#devolvemos los terminos
return processedTerms
# 0 clean punctuation and stopwords
def clean_terms(termlist, lang_in):
start_time=time()
if(lang_in=="es"):
stop=stopwords.words('spanish')
file=open(sw_spanish, 'r', encoding='utf-8')
mystop=file.readlines()
elif(lang_in=="en"):
stop=stopwords.words('english')
file=open(sw_english, 'r', encoding='utf-8')
mystop=file.readlines()
clean_list = []
cont=0
for i in mystop:
#print(i.strip())
stop.append(i.strip())
#print(stop)
deletes=[]
for i in termlist:
k=i.strip(',.:')
# print(k)
if ((k.lower() in stop) or (k in stop)):
deletes.append(k)
elif ((k.lower() not in stop) or (k not in stop)):
clean_list.append(k.replace(',', '').replace('-', ''))
print(deletes)
cont=len(termlist)-len(clean_list)
elapsed_time=time()-start_time
txt='CLEAN_TERMS, DELETE ('+str(cont)+') NEW LIST SIZE: ('+str(len(clean_list))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
print('CLEAN_TERMS, DELETE', cont, len(clean_list), elapsed_time )
return(clean_list)
# 1 añotador
def annotate_timex(text, date, lang):
f=open('texto.txt', 'w')
f.write(text)
textanotador2=''
start_time=time()
url = 'https://annotador.oeg.fi.upm.es/annotate'
params = "{\"inputText\":\""+text+"\",\"inputDate\":\"\",\"domain\":\"legal\",\"lan\":\""+lang+"\",\"format\":\"timex3\"}"
headers = {
'Content-Type': 'application/json;charset=utf-8'
}
#response=requests.post(url, data=params)
response=requests.request("POST", url, headers=headers, data = params.encode('utf8'))
textanotador=response.text
print('ENTRA ANOTADOR')
print(textanotador)
code=response.status_code
list_anotador=textanotador.split('|')
print(list_anotador)
deletes=[]
cont=0
for i in list_anotador:
if('<' in i and len(i)>2):
cont=cont+1
deletes.append(i)
ind=list_anotador.index(i)
list_anotador.pop(ind)
for i in list_anotador:
if('<' in i and len(i)>2):
print(i)
cont=cont+1
deletes.append(i)
ind=list_anotador.index(i)
list_anotador.pop(ind)
anotador=[]
for i in list_anotador:
anotador.append(i.strip().replace(',', ''))
if(code!=200):
print('WARNING: Annotador is down. Temporal expressions could not be removed.' )
anotador=text.split('| ')
conts_log.error('Annotador is down. Temporal expressions could not be removed.', code)
else:
elapsed_time=time()-start_time
txt='AÑOTADOR, DELETE ('+str(cont)+') NEW LIST SIZE: ('+str(len(anotador))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('AÑOTADOR DELETE', cont, len(anotador), elapsed_time )
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(anotador)
def infinitive(verb):
if(verb[-2:]=='ar' or verb[-2:]=='er' or verb[-2:]=='ir'):
verb=verb
else:
if(verb[-2:]=='rá' ):
#print('---',verb,'-',verb[:-1])
verb=verb[:-1]
if(verb[-2:]=='án'):
#print('---',verb,'-',verb[:-2])
verb=verb[:-2]
if(verb[-2:]=='ré'):
#print('---',verb,'-',verb[:-1])
verb=verb[:-1]
return (verb)
# 2.1 patrones es
def delete_pattern(anotador, pos_tagger):
total=0
deletes=[]
start_time=time()
lemmas_list=[]
cont=0
cont_inf=0
cont_post=0
for i in anotador:
print('this is i')
print(i)
if(len(i)>1):
#print( i, i.split(' ') )
#pos_tagger = CoreNLPParser('https://corenlp.run/', tagtype='pos')
#si se cae el de lynx, probar con este https://corenlp.run/
#print(i)
doc=pos_tagger(i)
#print(doc)
sent=doc.sentences[0]
word=sent.words
tag=[]
for token in word:
pos=token.upos
term=token.text
tupla=(term, pos)
tag.append(tupla)
print(token.text)
print(pos)
#tag=pos_tagger.tag(i.split(' '))
print('this is tag ')
print(tag)
total=total+1
joini=i
list_pos=[]
spl=joini.split(' ')
if(joini!=''):
join_tag=''
for t in tag:
print('this is t')
print(t)
if(t[1] == 'AUX' ):
doc=nlp(t[0])
lemlist=[tok.lemma_ for tok in doc]
lem=''.join(lemlist)
lemmas_list.append(lem)
if(lem==i):
lem=t[0]
list_pos.append('aux--'+str(lem))
if(len(spl)==1):
ind=anotador.index(str(i))
anotador[ind]=str(lem)
if(t[1] == 'NOUN'):
list_pos.append('noun-'+str(t[0]))
if(t[1] == 'VERB'):
cont_inf=cont_inf+1
doc=nlp(t[0])
for tok in doc:
l=tok.lemma_
if(l!=t[0]):
cont_post=cont_post+1
lemlist=[tok.lemma_ for tok in doc]
lem=''.join(lemlist)
lemmas_list.append(lem)
if(lem==i):
lem=t[0]
list_pos.append('verb-'+str(lem))
if(len(spl)==1):
ind=anotador.index(str(i))
anotador[ind]=str(lem)
if(t[1] == 'ADV'):
list_pos.append('adv--'+str(t[0]))
if(t[1] == 'ADJ'):
list_pos.append('adj--'+str(t[0]))
if(t[1] == 'SCONJ'):
list_pos.append('sconj'+str(t[0]))
spl_i=joini.split(' ')
if(len(list_pos)==1):
pos1=list_pos[0]
if(pos1[0:4]=='adv-' ):
term=pos1[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
elif(len(list_pos)==2 and len(spl_i)==2):
pos1=list_pos[0]
pos2=list_pos[1]
term=''
if(pos1[0:4]=='aux-' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='adj-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adv-' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='adv-'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
elif(len(list_pos)==3 and len(spl_i)==3):
#print(list_pos, spl_i,'-', len(list_pos), len(spl_i))
pos1=list_pos[0]
pos2=list_pos[1]
pos3=list_pos[2]
term=''
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='noun' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='noun' and pos3[0:4]=='verb'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='noun' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='noun' and pos3[0:4]=='aux-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='verb' and pos3[0:4]=='noun'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='adj-'):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='verb' and pos3[0:4]=='noun' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='aux-' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='adv-' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='adv-' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='noun' and pos2[0:4]=='adv-' and pos3[0:4]=='scon' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='scon' and pos3[0:4]=='adv-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='aux-' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='verb' and pos2[0:4]=='verb' and pos3[0:4]=='verb' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
if(pos1[0:4]=='adj-' and pos2[0:4]=='noun' and pos3[0:4]=='adj-' and joini in anotador):
term=pos1[5:]+' '+pos2[5:]+' '+pos3[5:]
deletes.append(joini)
ind=anotador.index(joini)
#anotador.pop(ind)
cont=cont+1
for i in deletes:
if(i in anotador):
ind=anotador.index(i)
anotador.pop(ind)
elapsed_time=time()-start_time
txt='PATRONES, DELETE'+' ('+str(cont)+') NEW LIST SIZE: ('+str(len(anotador))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('PATRONES DELETE', cont, len(anotador), elapsed_time)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(anotador)
# 3 plurales
def quit_plural(valuelist):
start_time=time()
file=open('./data/numberlist_es', 'r', encoding='utf-8')
read=file.readlines()
plural=[]
cont=0
for i in valuelist:
ind=valuelist.index(i)
term=i.replace(',', '').replace('-', ' ')
valuelist[ind]=term
plu=''
if('es' in term[-2:] or 's' in term[-1:]):
slp=term.split(' ')
for n in read:
if(n[:-1] in slp):
plu=i
if not len(plu):
for j in slp:
if( ('es' in j[-2:] ) and 't' not in j[-3:-2] and 'l' not in j[-3:-2] or ('les' in j[-3:] ) ):
plu+=' '+j[:-2]
if('on' in plu[-2:]):
plu=' '+plu[:-2]+'ón'
if('v' in plu[-1:]):
plu=' '+plu+'e'
if('bl' in plu[-2:]):
plu=' '+plu+'e'
if('br' in plu[-2:]):
plu=' '+plu+'e'
elif(('s' in j[-1:]) ):
plu+=' '+j[:-1]
pos=slp.index(j)
if(pos>0):
bef=slp[0]
if('n' in bef[-1:] and 'ón' not in bef[-2:]):
splb=plu.split(' ')
firts=splb[1]
if('n' not in firts[-1:]):
pass
else:
plu0=firts[:-1]
join1=' '.join(splb[2:])
plu=plu0+' '+join1
else:
plu+=' '+j
ind=valuelist.index(term)
valuelist[ind]=plu.strip()
cont=cont+1
quit_plu=[]
nuevalista=set(valuelist)
for i in nuevalista:
quit_plu.append(i)
deletes = []
new=[]
for i in valuelist:
if i not in new:
new.append(i)
else:
deletes.append(i)
#print('plurañes eliminadas ->', deletes)
elapsed_time=time()-start_time
txt='PLURAL, DELETE'+' ('+str(len(valuelist)-len(quit_plu))+') NEW LIST SIZE: ('+str(len(quit_plu))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('PLURALES DELETE', len(valuelist)-len(quit_plu), len(quit_plu), elapsed_time)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(quit_plu)
# 4 numeros
def delete_numbers(list_):
start_time=time()
file=open('./data/numberlist_es', 'r', encoding='utf-8')
read=file.readlines()
cont=0
deletes=[]
for i in read:
if(i[-1:]=='\n'):
i=i[:-1]
for j in list_:
if(' '+i+' ' in ' '+j+' ' ):
deletes.append(j)
ind=list_.index(j)
cont=cont+1
list_.pop(ind)
#list_.sort()
elapsed_time=time()-start_time
txt='NUMBERS, DELETE'+' ('+str(cont)+') NEW LIST SIZE: ('+str(len(list_))+') TIME: ('+str(elapsed_time)+')'
joind=', '.join(deletes)
print('NUMEROS DELETE', cont, len(list_), elapsed_time)
conts_log.information(txt, 'TERMS REMOVED: '+joind)
return(list_)
# 5 leer archivo
def readFile(read):
start_time=time()
text=''
for i in read:
if(i[-1:]=='\n'):
spl=i[:-1].split('\t')
else:
spl=i.split('\t')
term=spl[1].replace('-', '').replace(',', '').replace(';', '')
spl2=term.split(' ')
text+='| '+spl[1]
elapsed_time=time()-start_time
return text
#elimina tildes
def quit_tilds(s):
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
)
for a, b in replacements:
s = s.replace(a, b)
return s
def acentos(last):
start_time=time()
til=[]
list_acentos=[]
for i in last:
acento=re.search("[áéíóúÁÉÍÓÚ]+", i)
if(acento!=None):
sin=quit_tilds(i)
list_acentos.append(i)
til.append(sin)
else:
til.append(i)
til2 = []
delete=[]
for i in til:
if i not in til2:
til2.append(i)
else:
delete.append(i)
indices=[]
delete2=[]
for i in last:
if(i in delete and i not in indices):
indices.append(i)
delete2.append(i)
for i in delete2:
ind=last.index(i)
last.pop(ind)
last.sort()
elapsed_time=time()-start_time
return(last)
#-------MAIN-------#
def main(read, lang_in):
start_time=time()
text=readFile(read)
date='2020-06-03'
lang=lang_in
termlist=text.split('| ')
print('RECIBE', termlist)
clean_text=clean_terms(termlist, lang_in)
join_clean_text='| '.join(clean_text).replace('-', '').replace(',', '').replace(';', '')
anotador=annotate_timex(join_clean_text, date, lang)
anotador.sort()
if(lang_in=='es'):
pattern=delete_pattern(anotador)
plural=quit_plural(pattern)
numbers=delete_numbers(plural)
tildes=acentos(numbers)
stop2=clean_terms(tildes, lang_in)
print('FINALES', stop2)
'''new=open('../data/clean_terms_freq4.txt', 'w')#se imprime lo que se queda
for i in stop2:
new.write(i+'\n')
new.close()
elapsed_time=time()-start_time
print('Main', elapsed_time)
return(stop2)'''
#file=open('../data/estatuto_es.txt', 'r', encoding='utf-8')
#read=file.readlines()
#main(read)
|
3,409 | 77e985d94d3b47539f046a3a46cb1a197cef86f4 | ###############################################################################
# Programming Essentials B8IT102 Assessment #
# Student: Barry Sheppard ID: 10387786 #
# Problem 1 #
###############################################################################
###############################################################################
# Functions #
###############################################################################
def LimitedInput(message, limit, isNumber=False):
""" Prompt user for input and continue to do so until input is valid.
This function takes two required inputs, the message to display, and the
limit of characters required. If the user enters something too long, they
are prompted again until the input is correct.
If the optional isNumber parameter is True, then it will also continue to
prompt the user until a valid number is input.
"""
keepAsking = True
while keepAsking:
answer = input(message)
if len(answer) > limit:
print("The input must be", limit, "characters or less.")
else:
keepAsking = False
if isNumber is True and CheckNumber(answer) is False:
print("The input must be a number.")
keepAsking = True
return answer
def CheckNumber(userInput):
""" This function returns True if userInput can be converted to a number and
returns False if it cannot. """
try:
float(userInput)
return True
except(ValueError):
return False
def DateInput(message):
""" This function prompts the user for a date using the message variable.
User will continue to be prompted until the format is correct.
The date format is very specific in the format DD/MM/YYYYY
This function will confirm there are the right number of characters,
the / are in the right place, the input are numbers, the days are between
1 and 31, the months are between 1 and 12, and the year is between 2000
and 3000 (roll on year 3k bug!)
"""
askAgainMessage = "The date must be in the format DD/MM/YYYY"
keepAsking = True
while keepAsking:
answer = input(message)
# First we check if there are two / by splitting using / and looking
# for 3 items in the returned list.
dateCheck = answer.split(sep="/")
if len(dateCheck) is not 3:
print(askAgainMessage)
else:
# If all is order, we can assign the 3 items to day, month, year
day = dateCheck[0]
month = dateCheck[1]
year = dateCheck[2]
# Next we check each item has the right amount of characters
# and they can all be converted into numbers.
if (len(day) == 2 and len(month) == 2 and len(year) == 4 and
CheckNumber(day) and CheckNumber(month) and
CheckNumber(year)):
day = int(day)
month = int(month)
year = int(year)
if (day > 0 and day < 32 and month > 0 and month < 13 and
year > 2000 and year < 3000):
keepAsking = False
else:
print(askAgainMessage)
else:
print(askAgainMessage)
return answer
###############################################################################
# Prompt the user for the required input #
###############################################################################
# Ask the user to input the required details
employeeName = LimitedInput("Employee Name: ", 20) # Example Mark Bate
employeeNumber = LimitedInput("Employee Number: ", 10) # Example 123456789A
weekEnding = DateInput("Week ending: ") # Example 26/01/2018
hoursWorked = LimitedInput("Number of hours worked: ", 6, True) # Example 42.5
# As there are only 168 hours in the week this is a check to prevent errors
# This could be modified to a lower number based on legal limit
while float(hoursWorked) > 168:
print("The number of hours worked is too large.")
hoursWorked = LimitedInput("Number of hours worked: ", 6, True)
standardRate = LimitedInput("Hourly Rate: ", 6, True) # Example 10.50
overtimeMultiplier = LimitedInput("Overtime Rate: ", 3, True) # Example 1.5
standardTaxRate = LimitedInput("Standard Tax Rate: ", 2, True) # Example 20
overtimeTaxRate = LimitedInput("Overtime Tax Rate: ", 2, True) # Example 50
# Cnvert input to numbers, during the input we validated these as numerals
hoursWorked = float(hoursWorked)
standardRate = float(standardRate)
overtimeMultiplier = float(overtimeMultiplier)
standardTaxRate = float(standardTaxRate)
overtimeTaxRate = float(overtimeTaxRate)
###############################################################################
# Calculate required details for ouput #
###############################################################################
# Check if more than standard hours have been worked
if hoursWorked > 37.50:
standardHours = 37.50
overtimeHours = hoursWorked - 37.50
else:
standardHours = hoursWorked
overtimeHours = 0
# Complete additional calculations for pay and deductions
standardPayTotal = standardHours * standardRate
overtimeRate = overtimeMultiplier * standardRate # As overtime is multiplier
overtimePayTotal = overtimeHours * overtimeRate
standardTaxTotal = (standardPayTotal * standardTaxRate)/100
overtimeTaxTotal = (overtimePayTotal * overtimeTaxRate)/100
payTotal = standardPayTotal + overtimePayTotal
totalDeductions = standardTaxTotal + overtimeTaxTotal
netPay = payTotal - totalDeductions
###############################################################################
# Printing out the Payslip #
###############################################################################
# Output is one big chunk of text with the variables inserted using the format
# function, this lets us define the float variables as two digit decimals.
print("""
P A Y S L I P
WEEK ENDING {:}
Employee: {:}
Employee Number: {:}
Earnings Deductions
Hours Rate Total
Hours (normal) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}
Hours (overtime) {:6.2f} {:6.2f} {:6.2f} Tax @ {:02.0f}% {:6.2f}
Total pay: {:7.2f}
Total deductions: {:7.2f}
Net pay: {:7.2f}
""".format(weekEnding, employeeName, employeeNumber, standardHours,
standardRate, standardPayTotal, standardTaxRate, standardTaxTotal,
overtimeHours, overtimeRate, overtimePayTotal, overtimeTaxRate,
overtimeTaxTotal, payTotal, totalDeductions, netPay))
|
3,410 | 3fed8723d215bce3cf391752e07ca85b2d6701a3 | # Generated by Django 2.1.7 on 2019-04-01 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0004_auto_20190401_1834'),
]
operations = [
migrations.AlterField(
model_name='mainsubmission',
name='execution_time',
field=models.DecimalField(blank=True, decimal_places=3, default=0, max_digits=6, null=True),
),
]
|
3,411 | f8b473451a15e42319b60f44a527d715c0032614 | n = int(input())
s = ""
for i in range(n):
l = list(map(lambda x:x*x,map(int, input().split())))
l.sort()
if l[0] + l[1] == l[2]:
s += "YES\n"
else:
s += "NO\n"
print(s,end="") |
3,412 | 915d6547057f43c1cc5d96d9cb4529c56bc85559 | #"countinu" example : repeat printing "Too small" or "Input is..." according to input's lenth
while True:
s=raw_input('Enter something: ')
if s == 'quit' :
break
if len(s) <3:
print 'Too small'
continue
#continue : not excute lower line, go to next loop
print 'Input is of sufficient lenth'
|
3,413 | 78e72bf3ac73113e2c71caf5aed70b53cafa9c46 | #题目014:将一个正整数分解质因数
#【编程思路】类似手算分解质因数的过程,找出因数后,原数字缩小
'''
找出质因数并不难,把他们打印出来有点小烦
'''
num = int(input('请输入一个整数:'))
original=num
a= []
while num > 1:
for i in range(2,num+1):
if num%i == 0:
a.append(i)
num = num//i
break
print("%d ="%(original),end='')
for i in range(len(a)-1):
print(a[i],end='*')
print(a[len(a)-1])
|
3,414 | 1aa2bff245322a34438cc836e23f430926dfac6c | import pymysql
db = pymysql.connect( "localhost", "root", "", "order_db",
use_unicode=True, charset="utf8")
cursor = db.cursor()
sql = "DROP TABLE custdetail"
cursor.execute(sql)
db.close()
|
3,415 | 2e794e281c6f34858cd32725cdc454eb18c28892 | import sys
V, E = map(int, sys.stdin.readline().split())
node = []
graphs = []
for i in range(V+1):
node.append(i)
for _ in range(E):
graphs.append((list(map(int, sys.stdin.readline().split()))))
graph = sorted(graphs, key=lambda x: x[2])
def get_parent(parent, x):
if parent[x] == x:
return x
parent[x] = get_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = get_parent(parent, a)
b = get_parent(parent, b)
if a != b:
parent[b] = a
N = 0
distance = 0
idx = 0
while N < V-1:
A, B, dist = graph[idx]
if get_parent(node, A) == get_parent(node, B):
idx += 1
continue
union_parent(node, A, B)
distance += dist
N += 1
idx += 1
print(distance)
|
3,416 | 3ac30240577eda08343796abbd051d5d3b45beaf | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
from moviepy.editor import VideoFileClip
output_images_dir = './output_images/'
test_images_dir = './test_images/'
output_video_file = 'output.mp4'
mtx = None
dist = None
def load_image(filename):
return mpimg.imread(filename)
def calibrate_camera(rows=6, cols=9):
mtx = None
dist = None
save_file = 'calibration.npz'
try:
data = np.load(save_file)
mtx = data['mtx']
dist = data['dist']
print('using saved calibration')
except FileNotFoundError:
print('begin calibration')
filenames = glob('camera_cal/*.jpg')
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
#Prepare object points, like (0,0,0), (1,0,0)...
objp = np.zeros((rows*cols,3), np.float32)
objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates
for f in filenames:
img = load_image(f)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
if ret:
for f in filenames:
img = load_image(f)
undist = cv2.undistort(img, mtx, dist, None, mtx)
save_output_image(undist, 'undistorted-' + f.split('/')[-1])
print('end calibration')
np.savez(save_file, mtx=mtx, dist=dist)
return mtx, dist
def save_output_image(img, filename, cmap=None):
mpimg.imsave(output_images_dir + filename, img, cmap=cmap)
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
grad_binary = np.zeros_like(scaled_sobel)
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return grad_binary
def color_threshold(img):
#R = img[:,:,0]
#G = img[:,:,1]
#B = img[:,:,2]
#binary = np.zeros_like(R)
#binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary = np.zeros_like(H)
binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1
return binary
def window_mask(width, height, img_ref, center,level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1
return output
def find_lr_window_centroids(image, window_width, window_height, margin):
#window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
left_centroids = []
right_centroids = []
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)
l_center = np.argmax(np.convolve(window,l_sum))-window_width/2
r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)
r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)
y_base = int(image.shape[0] - window_height/2)
# Add what we found for the first layer
y_center = y_base
left_centroids.append((l_center, y_center))
right_centroids.append((r_center, y_center))
# Go through each layer looking for max pixel locations
for level in range(1,(int)(image.shape[0]/window_height)):
y_center = int(y_base - (level * window_height))
# convolve the window into the vertical slice of the image
image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width/2
l_min_index = int(max(l_center+offset-margin,0))
l_max_index = int(min(l_center+offset+margin,image.shape[1]))
l_max = np.argmax(conv_signal[l_min_index:l_max_index])
if l_max > 50:
left_centroids.append((l_center, y_center))
l_center = l_max+l_min_index-offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center+offset-margin,0))
r_max_index = int(min(r_center+offset+margin,image.shape[1]))
r_max = np.argmax(conv_signal[r_min_index:r_max_index])
if r_max > 50:
right_centroids.append((r_center, y_center))
r_center = r_max+r_min_index-offset
return left_centroids, right_centroids
def draw_window_boxes(img, l_points, r_points, window_width, window_height):
if len(l_points) > 0:
for p in l_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)
if len(r_points) > 0:
for p in r_points:
cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)
return img
def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):
if len(window_centroids) > 0:
# Points used to draw all the left and right windows
l_points = np.zeros_like(warped)
r_points = np.zeros_like(warped)
# Go through each level and draw the windows
for level in range(0,len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)
r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)
# Add graphic points from window mask here to total pixels found
l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255
r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255
# Draw the results
#template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together
zero_channel = np.zeros_like(l_points) # create a zero color channle
template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green
warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels
output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results
# If no window centers found, just display orginal road image
else:
output = np.array(cv2.merge((warped,warped,warped)),np.uint8)
return output
def draw_text(img, text, origin):
cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)
def pipeline_image(img, save_images=None, save_suffix='.jpg'):
if save_images:
print('begin pipeline_image', save_suffix)
undistorted = undistort(img)
if save_images:
save_output_image(undistorted, 'undistorted' + save_suffix)
#binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))
binary = color_threshold(undistorted)
if save_images:
save_output_image(binary, 'binary' + save_suffix, cmap='gray')
img_size = binary.shape[::-1]
src = np.float32(
[[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
if save_images:
cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)
save_output_image(img, 'polygon' + save_suffix)
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)
if save_images:
save_output_image(warped, 'warped' + save_suffix, cmap='gray')
window_width = 40
window_height = 60
#identified lane-line pixels and fit their positions with a polynomial
l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)
global last_l_points, last_r_points
if len(l_points) < 5 and len(last_l_points) > 0:
#print("less than 4 l_points:", len(r_points))
# use the previous points
l_points = last_l_points
else:
last_l_points = l_points
l_points = np.array(l_points, dtype=np.int32)
l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)
if len(r_points) < 5 and len(last_r_points) > 0:
#print("less than 4 r_points:", len(r_points))
r_points = last_r_points
else:
last_r_points = r_points
r_points = np.array(r_points, dtype=np.int32)
r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)
yval = np.arange(0, warped.shape[0])
l_xval = np.polyval(l_poly, yval)
r_xval = np.polyval(r_poly, yval)
if save_images:
lanes = warped*255
lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green
lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)
for p in l_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for p in r_points:
cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)
for x,y in zip(l_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)
for x,y in zip(r_xval, yval):
cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)
save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
#calculated the position of the vehicle with respect to center
lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix
direction = 'Left'
if lane_center_offset_m > 0:
direction = 'Right'
#calculated the radius of curvature of the lane
y_eval = np.max(yval)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)
right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
#Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)
draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50))
draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100))
output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)
if save_images:
save_output_image(output, 'output' + save_suffix)
return output
def process_test_images():
filenames = glob('test_images/*.jpg')
#filenames = ['test_images/test2.jpg']
for f in filenames:
img = load_image(f)
img_out = pipeline_image(img, True, '-' + f.split('/')[-1])
#show_before_after(img, img_out, 'gray')
def process_video(in_file, out_file):
clip = VideoFileClip(in_file)
video_clip = clip.fl_image(pipeline_image)
video_clip.write_videofile(out_file, audio=False)
def show_before_after(before, after, cmap=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
ax1.imshow(before)
ax1.set_title('Before')
ax2.imshow(after, cmap=cmap)
ax2.set_title('After')
plt.show()
def show_images(imgs, titles):
fig, axes = plt.subplots(3, 6, figsize=(12, 6))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
for ax, img, title in zip(axes.flat, imgs, titles):
ax.imshow(img)
ax.set_title(title)
plt.show()
last_l_points = []
last_r_points = []
mtx, dist = calibrate_camera()
process_test_images()
process_video('project_video.mp4', 'output.mp4')
process_video('challenge_video.mp4', 'challenge_output.mp4')
process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')
|
3,417 | 644b4a2f0e8ce95e669c9c01df111c943e0c4af2 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Module test_measured_model - Contains the unit tests for the classes
in the datamodels.miri_measured_model module.
:History:
15 Jan 2013: Created.
21 Jan 2013: Warning messages controlled with Python warnings module.
05 Feb 2013: File closing problem solved by using "with" context manager.
08 Feb 2013: Replaced 'to_fits' with more generic 'save' method.
23 Apr 2013: Modified to keep up with behaviour of jwst_lib model.
Uninitialised arrays now have the same size and shape as the
data array but are full of default values.
26 Apr 2013: File closing problem has returned!
13 May 2013: Added MiriSlopeModel to describe MIRI slope data
(which is different from "ImageModel" data because it
preserves integrations). N.B. FINAL MODEL IS TBD.
04 Jun 2013: Shortened the names of the ramp, slope and image models.
10 Jun 2013: Added more metadata tests.
02 Jul 2013: MiriCubeModel added.
29 Jul 2013: stats() method added.
14 Aug 2013: Updated ramp model test to include groupdq and pixeldq
02 Sep 2013: Compare numpy record arrays in a way that it independent
of the byte ordering.
12 Sep 2013: Swapped the MRS CHANNEL and BAND keywords.
12 Sep 2013: Test that the data product can be copied successfully.
04 Oct 2013: Changed default field_def table to use MIRI reserved flags.
07 Oct 2013: GROUP_DEF table added to MIRI ramp data. Test MiriRampModel
for masking and arithmetic operations.
24 Feb 2014: Instrument name (INSTRUME) changed from meta.instrument.type to
meta.instrument.name.
27 Feb 2014: Added extra data arrays to MiriSlopeModel test.
04 Mar 2014: Added set_housekeeping_metadata.
25 Jun 2014: field_def and group_def changed to dq_def and groupdq_def.
field_def for ramp data changed to pixeldq_def.
21 Jul 2014: IM, and LW detectors changed to MIRIMAGE and MIRIFULONG.
25 Sep 2014: Updated the reference flags. insert_value_column function
used to convert between 3 column and 4 column flag tables.
TYPE and REFTYPE are no longer identical.
07 Nov 2014: The data model now raises an IOError when an invalid file
path is provided.
11 Mar 2015: group_integration_time changed to group_time.
11 Jun 2015: Added a history record test.
09 Jul 2015: Reference output array (refout) added to MiriRampModel schema.
19 Aug 2015: Removed MiriImageModel and MiriCubeModel.
07 Oct 2015: Made exception catching Python 3 compatible.
08 Apr 2016: Removed obsolete FIXME statements.
04 May 2016: ERR array removed from ramp data model.
31 Aug 2016: Change exception detected when creating a data model with an
invalid initialiser.
15 Jun 2017: Observation and target metadata is appropriate for ramp and
slope data only.
12 Jul 2017: Replaced "clobber" parameter with "overwrite".
13 Sep 2017: Updated "not a file name" test to match the new behaviour of
JWST pipeline version 0.7.8rc2
27 Apr 2018: Corrected bug in get_history() length test.
27 Jun 2018: Removed unused arrays.
15 Feb 2018: Check that the DQ_DEF table has the correct fieldnames.
@author: Steven Beard (UKATC)
"""
import os
import unittest
import warnings
import numpy as np
# Import the JWST master data quality flag definitions
from miri.datamodels.dqflags import master_flags, pixeldq_flags, \
groupdq_flags
from miri.datamodels.miri_measured_model import MiriMeasuredModel, \
MiriRampModel, MiriSlopeModel
from miri.datamodels.tests.util import assert_recarray_equal, \
assert_products_equal
class TestMiriMeasuredModel(unittest.TestCase):
# Test the MiriMeasuredModel class
def setUp(self):
# Create a 64x64 simple MiriMeasuredModel object, with no error
# or quality arrays.
self.data = np.linspace(0.0, 100000.0, 64*64)
self.data.shape = [64,64]
self.simpleproduct = MiriMeasuredModel(data=self.data)
# Add some example metadata.
self.simpleproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.simpleproduct.set_instrument_metadata(detector='MIRIMAGE',
filt='F560W',
ccc_pos='OPEN',
deck_temperature=10.0,
detector_temperature=7.0)
self.simpleproduct.set_exposure_metadata(readpatt='SLOW',
nints=1, ngroups=10,
frame_time=30.0,
integration_time=30.0,
group_time=300.0,
reset_time=0, frame_resets=3)
# Create a more complex MiriMeasuredModel object from primary,
# error and quality arrays.
self.primary = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
self.error = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
self.quality = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.dataproduct = MiriMeasuredModel(data=self.primary,
err=self.error,
dq=self.quality,
dq_def=master_flags)
# Add some example metadata.
self.dataproduct.set_instrument_metadata(detector='MIRIFUSHORT',
channel='1',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='FAST',
nints=1, ngroups=1,
frame_time=1.0,
integration_time=10.0,
group_time=10.0,
reset_time=0, frame_resets=3)
self.testfile1 = "MiriMeasuredModel_test1.fits"
self.testfile2 = "MiriMeasuredModel_test2.fits"
self.tempfiles = [self.testfile1, self.testfile2]
def tearDown(self):
# Tidy up
del self.dataproduct
del self.primary, self.error, self.quality
del self.simpleproduct
del self.data
# Remove temporary files, if they exist and if able to.
for tempfile in self.tempfiles:
if os.path.isfile(tempfile):
try:
os.remove(tempfile)
except Exception as e:
strg = "Could not remove temporary file, " + tempfile + \
"\n " + str(e)
warnings.warn(strg)
del self.tempfiles
def test_creation(self):
# Check that the DQ_DEF field names in the class variable are the same
# as the ones declared in the schema.
dq_def_names = list(MiriMeasuredModel.dq_def_names)
schema_names = list(self.dataproduct.get_field_names('dq_def'))
self.assertEqual(dq_def_names, schema_names,
"'dq_def_names' class variable does not match schema")
# Test that the error and quality arrays are optional.
a2 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
b2 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
c2 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
# 1) Data array only. Data array must exist and be non-empty.
# Other arrays should exist and be the same size and shape as the
# data array. They should be full of default values.
newdp1 = MiriMeasuredModel(data=a2)
self.assertIsNotNone(newdp1.data)
self.assertGreater(len(newdp1.data), 0)
self.assertIsNotNone(newdp1.err)
self.assertEqual(newdp1.err.shape, newdp1.data.shape)
# Assumes default is 0.0 - see schema
self.assertAlmostEqual(np.mean(newdp1.err), 0.0)
self.assertIsNotNone(newdp1.dq)
self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp1.dq), 0)
descr1 = str(newdp1)
self.assertIsNotNone(descr1)
del newdp1, descr1
# 2) Data and error arrays only. Data and error arrays must exist
# and be non-empty. Quality array should exist but be the same
# size and shape as the data array. It should be full of default
# values.
newdp2 = MiriMeasuredModel(data=a2, err=b2)
self.assertIsNotNone(newdp2.data)
self.assertGreater(len(newdp2.data), 0)
self.assertIsNotNone(newdp2.err)
self.assertEqual(newdp2.err.shape, newdp2.data.shape)
# The error array must not be full of default values.
self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)
self.assertIsNotNone(newdp2.dq)
self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp2.dq), 0)
descr2 = str(newdp2)
self.assertIsNotNone(descr2)
del newdp2, descr2
# 3) Data, error and quality arrays. All arrays must exist,
# be non-empty and be the same size and shape.
newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)
self.assertIsNotNone(newdp3.data)
self.assertGreater(len(newdp3.data), 0)
self.assertIsNotNone(newdp3.err)
self.assertEqual(newdp3.err.shape, newdp3.data.shape)
# The error array must not be full of default values.
self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)
self.assertIsNotNone(newdp3.dq)
self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)
# The quality array must not be full of default values.
self.assertNotEqual(np.mean(newdp3.dq), 0)
descr3 = str(newdp3)
self.assertIsNotNone(descr3)
del newdp3, descr3
# It should be possible to set up an empty data product with
# a specified shape. All three arrays should be initialised to
# the same shape.
emptydp = MiriMeasuredModel( (4,4) )
self.assertIsNotNone(emptydp.data)
self.assertEqual(emptydp.data.shape, (4,4))
self.assertIsNotNone(emptydp.err)
self.assertEqual(emptydp.err.shape, (4,4))
self.assertIsNotNone(emptydp.dq)
self.assertEqual(emptydp.dq.shape, (4,4))
descr = str(emptydp)
self.assertIsNotNone(descr)
del emptydp, descr
# A null data product can also be created and populated
# with data later.
nulldp = MiriMeasuredModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.data = np.asarray(a2)
self.assertIsNotNone(nulldp.err)
self.assertIsNotNone(nulldp.dq)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
# A scalar data product is possible, even if of little use.
scalardp = MiriMeasuredModel( data=42 )
self.assertEqual(scalardp.data, 42)
self.assertIsNotNone(scalardp.err)
self.assertIsNotNone(scalardp.dq)
descr = str(scalardp)
self.assertIsNotNone(descr)
del scalardp, descr
# Attempts to create a data product from invalid data types
# and stupid values must be detected.
# NOTE: A bug in the JWST data model might cause an AttributeError
# to be raised instead of a ValueError. If this happens, try a newer
# version of the JWST data model library.
self.assertRaises(ValueError, MiriMeasuredModel, init=[])
self.assertRaises(ValueError, MiriMeasuredModel, init=42)
self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name')
self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')
#self.assertRaises(ValueError, MiriMeasuredModel, init='')
self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')
def test_metadata(self):
# Check the dataproducts contain metadata
# First test the basic STScI FITS keyword lookup method.
kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',
return_result=True)
self.assertIsNotNone(kwstrg)
# kwstrg is a list - assume the first entry is what we want.
telname = self.simpleproduct[kwstrg[0]]
self.assertEqual(telname, 'JWST')
# Accessing the tree structure directly should also work.
telname = self.simpleproduct.meta.telescope
self.assertEqual(telname, 'JWST')
# An alternative lookup provided by the MIRI data model.
telname = self.simpleproduct.get_fits_keyword('TELESCOP')
self.assertEqual(telname, 'JWST')
kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',
return_result=True)
self.assertIsNotNone(kwstrg)
insname = self.simpleproduct[kwstrg[0]]
self.assertEqual(insname, 'MIRI')
insname = self.simpleproduct.meta.instrument.name
self.assertEqual(insname, 'MIRI')
insname = self.simpleproduct.get_fits_keyword('INSTRUME')
self.assertEqual(insname, 'MIRI')
# Add some history records and check they exist.
self.simpleproduct.add_history('History 1')
self.simpleproduct.add_history('History 2')
self.simpleproduct.add_history('History 3')
self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)
strg = self.simpleproduct.get_history_str()
self.assertIsNotNone(strg)
self.assertGreater(len(strg), 0)
def test_content(self):
# The data, err and dq attributes are aliases for the primary,
# error and quality arrays
self.assertTrue( np.allclose(self.primary, self.dataproduct.data) )
self.assertTrue( np.allclose(self.error, self.dataproduct.err) )
self.assertTrue( np.allclose(self.quality, self.dataproduct.dq) )
def test_copy(self):
# Test that a copy can be made of the data product.
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy)
assert_products_equal( self, self.dataproduct, datacopy,
arrays=['data', 'err', 'dq'],
tables='dq_def' )
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data products can be written to a FITS
# file and read back again without changing the data.
self.simpleproduct.save(self.testfile1, overwrite=True)
with MiriMeasuredModel(self.testfile1) as readback:
self.assertTrue( np.allclose(self.simpleproduct.data,
readback.data) )
del readback
self.dataproduct.save(self.testfile2, overwrite=True)
with MiriMeasuredModel(self.testfile2) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'err', 'dq'],
tables='dq_def' )
del readback
def test_asciiio(self):
# Check that the data products can be written to an ASCII
# file and read back again without changing the data.
# TODO: At the moment jwst_lib only supports FITS I/O
pass
# # Suppress metadata warnings
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# self.simpleproduct.save(self.testfile_ascii, overwrite=True)
# with MiriMeasuredModel(self.testfile_ascii) as readback:
# self.assertTrue( np.allclose(self.simpleproduct.data,
# readback.data) )
# del readback
def test_masking(self):
# The DQ array must mask off bad values in the SCI and ERR arrays.
a2 = [[10,999,10,999], [999,10,10,999], [10,10,999,10]]
b2 = [[1,99,1,99], [99,1,1,99], [1,1,99,1]]
c2 = [[0,1,0,1], [1,0,0,1], [0,0,1,0]]
# Without a DQ array (assuming the default quality value is 0)
# the SCI and ERR arrays are not masked, so their averages
# include the 999s and are greater than they ought to be.
newdp = MiriMeasuredModel(data=a2, err=b2)
meandata = np.mean(newdp.data_masked)
self.assertGreater(meandata, 10)
meanerr = np.mean(newdp.err_masked)
self.assertGreater(meanerr, 1)
# The addition of the quality data should cause the SCI and ERR
# arrays to be masked off and give the correct average.
newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)
meandata2 = np.mean(newdp2.data_masked)
self.assertAlmostEqual(meandata2, 10)
meanerr2 = np.mean(newdp2.err_masked)
self.assertAlmostEqual(meanerr2, 1)
del newdp, newdp2
def test_arithmetic(self):
a2 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]
b2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
c2 = [[0,1,1,0],[0,2,0,2],[1,0,1,0]]
newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)
# Self-subtraction of the simple product. The result
# should be zero.
newsimple = self.simpleproduct - self.simpleproduct
self.assertAlmostEqual(newsimple.data.all(), 0.0)
del newsimple
# Scalar addition
result = self.dataproduct + 42
test1 = self.dataproduct.data + 42
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product addition
result = self.dataproduct + newdp
test1 = self.dataproduct.data + newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test that error arrays are combined properly - at least for
# a couple of unmasked points.
expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]
actualsq = result.err[1,0]*result.err[1,0]
self.assertAlmostEqual(expectedsq, actualsq)
expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]
actualsq = result.err[2,1]*result.err[2,1]
self.assertAlmostEqual(expectedsq, actualsq)
del result
# Scalar subtraction
result = self.dataproduct - 42
test1 = self.dataproduct.data - 42
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product subtraction
result = self.dataproduct - newdp
test1 = self.dataproduct.data - newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test that error arrays are combined properly - at least for
# a couple of unmasked points.
expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]
actualsq = result.err[1,0]*result.err[1,0]
self.assertAlmostEqual(expectedsq, actualsq)
expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]
actualsq = result.err[2,1]*result.err[2,1]
self.assertAlmostEqual(expectedsq, actualsq)
del result
# Addition and subtraction should cancel each other out
result = self.dataproduct + newdp - newdp
test1 = self.dataproduct.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Scalar multiplication
result = self.dataproduct * 3
test1 = self.dataproduct.data * 3
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product multiplication
result = self.dataproduct * newdp
test1 = self.dataproduct.data * newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
err1 = self.dataproduct.err
da1 = self.dataproduct.data
err2 = newdp.err
da2 = newdp.data
expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1)
self.assertTrue(np.array_equal(expectedErr, result.err))
del result, da1, da2, err1, err2, expectedErr
# Scalar division
result = self.dataproduct / 3.0
test1 = self.dataproduct.data / 3.0
test2 = result.data
self.assertAlmostEqual(test1.all(), test2.all())
del test1, test2, result
# Division by zero
self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)
# Data product division
#print("NOTE: The following test is expected to generate run time warnings.")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = self.dataproduct / newdp
test1 = self.dataproduct.data / newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test Juergen Schreiber error propagation
dat = self.dataproduct.data[1][1]
newdat = newdp.data[1][1]
resultErr = result.err[1][1]
dpErr = self.dataproduct.err[1][1]
newdpErr = newdp.err[1][1]
expectErr = np.sqrt( dpErr * dpErr/(newdat * newdat) + \
newdpErr * newdpErr * dat * dat / \
(newdat * newdat * newdat * newdat))
self.assertEqual(expectErr, resultErr)
del test1, test2, result
# More complex arithmetic should be possible.
newdp2 = newdp * 2
newdp3 = newdp * 3
newdp4 = newdp2 + newdp3
result = ((self.dataproduct - newdp) * newdp2 / newdp3) + newdp4
del newdp, newdp2, newdp3, newdp4
del result
def test_broadcasting(self):
# Test that operations where the broadcasting of one array
# onto a similar shaped array work.
a4x3 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]
b4x3 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
#c4x3 = [[0,1,0,0],[0,0,1,0],[1,0,0,1]]
a4x1 = [4,3,2,1]
b4x1 = [1,2,1,2]
c4x1 = [0,1,0,0]
#a5x1 = [5,4,3,2,1]
#b5x1 = [1,2,3,2,1]
c5x1 = [0,1,0,0,1]
# Create an object with 4x3 primary and error arrays but a 4x1
# quality array. This should succeed because the quality array
# is broadcastable.
newdp1 = MiriMeasuredModel(data=a4x3, err=b4x3, dq=c4x1)
self.assertTrue( np.allclose(a4x3, newdp1.data) )
self.assertTrue( np.allclose(b4x3, newdp1.err) )
self.assertTrue( np.allclose(c4x1, newdp1.dq) )
# 5x1 is not broadcastable onto 4x3 and this statement should fail.
# NOTE: Unfortunately this test also issues a warning message,
# "'MiriMeasuredModel' object has no attribute '_real_cls'".
# Turning off warnings does not stop this message from appearing.
self.assertRaises(TypeError, MiriMeasuredModel, data=a4x3,
error=b4x3, quality=c5x1)
# Combine two broadcastable object mathematically.
# The + and - operations should be commutative and the result
# should be saveable to a FITS file.
newdp2 = MiriMeasuredModel(data=a4x1, err=b4x1, dq=c4x1)
result1 = newdp1 + newdp2
result2 = newdp2 + newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.data, result2.data) )
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
result1 = newdp1 * newdp2
result2 = newdp2 * newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.data, result2.data) )
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
# The - and / operations are not commutative, but the data shape
# should be consistent and the quality arrays should be combined
# in the same way.
result1 = newdp1 - newdp2
result2 = newdp2 - newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
result1 = newdp1 / newdp2
result2 = newdp2 / newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
# The errors resulting from division depend on the order
# of the operation.
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.simpleproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.simpleproduct)
self.assertIsNotNone(descr)
del descr
descr = self.simpleproduct.stats()
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI, ERROR and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.err)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
class TestMiriRampModel(unittest.TestCase):
# Most of the necessary tests are already carried out by
# the TestMiriMeasuredModel class.
def setUp(self):
# Create a ramp data product.
# NOTE: A ramp product does not contain an ERR array.
self.a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
self.c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.c2 = [[0,1,1,0], [1,0,0,1], [1,0,1,0]]
self.acube = [self.a1,self.a1,self.a1]
self.ccube = [self.c1,self.c2,self.c1]
self.ahyper = [self.acube,self.acube]
self.chyper = [self.ccube,self.ccube]
self.refout = np.ones_like(self.chyper)
self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.refout,
pixeldq=self.c1,
dq_def=pixeldq_flags,
groupdq=self.chyper,
groupdq_def=groupdq_flags)
# Add some example metadata.
self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.dataproduct.set_observation_metadata()
self.dataproduct.set_target_metadata(0.0, 0.0)
self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',
channel='1',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='FAST',
nints=1, ngroups=1,
frame_time=1.0,
integration_time=10.0,
group_time=10.0,
reset_time=0, frame_resets=3)
self.testfile = "MiriRampModel_test.fits"
def tearDown(self):
# Tidy up
del self.a1, self.c1, self.c2
del self.acube, self.ccube
del self.ahyper, self.chyper
del self.dataproduct
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_creation(self):
# Test that any of the quality arrays are optional.
b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
bcube = [b1,b1,b1]
bhyper = [bcube,bcube]
# 1) Data array only. Data array must exist and be non-empty.
# The quality arrays must be 2-D and 4-D.
# Unspecified arrays must be filled with default values.
newdp1 = MiriRampModel(data=self.ahyper)
self.assertIsNotNone(newdp1.data)
self.assertGreater(len(newdp1.data), 0)
# Assumes default is 0.0 - see schema
self.assertIsNotNone(newdp1.pixeldq)
self.assertTrue(newdp1.pixeldq.ndim == 2)
# Assumes default is 0 - see schema
# FIXME: The pixeldq array ends up containing null values.
#self.assertEqual(np.mean(newdp1.pixeldq), 0)
self.assertIsNotNone(newdp1.groupdq)
self.assertTrue(newdp1.groupdq.ndim == 4)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp1.groupdq), 0)
descr1 = str(newdp1)
del newdp1, descr1
# 2) Data and both quality arrays. All arrays must exist,
# be non-empty and be the shape specified.
newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper)
self.assertIsNotNone(newdp3.data)
self.assertGreater(len(newdp3.data), 0)
# The pixeldq array must not be full of default values.
self.assertIsNotNone(newdp3.pixeldq)
self.assertTrue(newdp3.pixeldq.ndim == 2)
self.assertNotEqual(np.mean(newdp3.pixeldq), 0)
self.assertIsNotNone(newdp3.groupdq)
self.assertTrue(newdp3.groupdq.ndim == 4)
# The groupdq array must not be full of default values.
self.assertNotEqual(np.mean(newdp3.groupdq), 0)
descr3 = str(newdp3)
del newdp3, descr3
# 3) Data and pixeldq array only. All arrays must exist,
# be non-empty and be the shape specified.
newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)
self.assertIsNotNone(newdp4.data)
self.assertGreater(len(newdp4.data), 0)
# The pixeldq array must not be full of default values.
self.assertIsNotNone(newdp4.pixeldq)
self.assertTrue(newdp4.pixeldq.ndim == 2)
self.assertNotEqual(np.mean(newdp4.pixeldq), 0)
self.assertIsNotNone(newdp4.groupdq)
self.assertTrue(newdp4.groupdq.ndim == 4)
descr4 = str(newdp4)
del newdp4, descr4
# 4) Data and groupdq array only. All arrays must exist,
# be non-empty and be the shape specified.
newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)
self.assertIsNotNone(newdp5.data)
self.assertGreater(len(newdp5.data), 0)
self.assertIsNotNone(newdp5.pixeldq)
self.assertTrue(newdp5.pixeldq.ndim == 2)
# The groupdq array must not be full of default values.
self.assertIsNotNone(newdp5.groupdq)
self.assertTrue(newdp5.groupdq.ndim == 4)
# The groupdq array must not be full of default values.
self.assertNotEqual(np.mean(newdp5.groupdq), 0)
descr5 = str(newdp5)
del newdp5, descr5
# It should be possible to set up an empty data product with
# a specified 4-D shape. Data array should be
# initialised to the same shape.
emptydp = MiriRampModel( (2,2,2,2) )
self.assertIsNotNone(emptydp.data)
self.assertEqual(emptydp.data.shape, (2,2,2,2))
self.assertIsNotNone(emptydp.pixeldq)
#self.assertEqual(emptydp.pixeldq.shape, (2,2))
self.assertIsNotNone(emptydp.groupdq)
self.assertEqual(emptydp.groupdq.shape, (2,2,2,2))
descr = str(emptydp)
self.assertIsNotNone(descr)
del emptydp, descr
# A null data product can also be created and populated
# with data later.
nulldp = MiriRampModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.data = np.asarray(self.ahyper)
self.assertIsNotNone(nulldp.pixeldq)
self.assertIsNotNone(nulldp.groupdq)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
# Creating an object with other than 4 dimensions must fail.
a1d = [10,20,30,40]
c1d = [1,0,0,0]
self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)
a2d = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
c2d = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)
a3d = [a2d, a2d, a2d]
c3d = [c2d, c2d, c2d]
self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)
self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)
# The pixeldq array must be 2-D.
self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,
pixeldq=self.ccube)
# The groupdq array must be 4-D.
self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,
groupdq=self.c1)
def test_masking(self):
# Ramp data must have a dq array which gives a view of one
# or both of the pixeldq and groupdq masks
self.assertIsNotNone(self.dataproduct.dq)
# Create a data product masked by the pixeldq array.
# The dq and pixeldq arrays must be the same
mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='pixeldq')
self.assertIsNotNone(mask1.pixeldq)
self.assertGreater(len(mask1.pixeldq), 0)
self.assertIsNotNone(mask1.dq)
self.assertGreater(len(mask1.dq), 0)
self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)
self.assertTrue(np.all( mask1.dq == mask1.pixeldq ))
del mask1
# Create a data product masked by the groupdq array.
# The dq and groupdq arrays must be the same
mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='groupdq')
self.assertIsNotNone(mask2.groupdq)
self.assertGreater(len(mask2.groupdq), 0)
self.assertIsNotNone(mask2.dq)
self.assertGreater(len(mask2.dq), 0)
self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)
self.assertTrue(np.all( mask2.dq == mask2.groupdq ))
del mask2
# Create a data product masked by both pixeldq and groupdq arrays.
# The result must have the same shape as the groupdq array but be
# a combination of both masks.
mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='both')
self.assertIsNotNone(mask3.pixeldq)
self.assertGreater(len(mask3.pixeldq), 0)
self.assertIsNotNone(mask3.groupdq)
self.assertGreater(len(mask3.groupdq), 0)
self.assertIsNotNone(mask3.dq)
self.assertGreater(len(mask3.dq), 0)
self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)
expected = mask3.groupdq | mask3.pixeldq
self.assertTrue(np.all( mask3.dq == expected ))
del mask3
def test_arithmetic(self):
# The ramp data model supports all the arithmetic operations
# supported by the MiriMeasuredModel. The following are exceptions
# specific to the ramp model.
# Create a data model in which the DATA and DQ arrays have different
# shapes.
testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='both')
descr = str(testdp)
self.assertIsNotNone(descr)
del descr
# Suppress warning about the DQ array being propagated only from GROUPDQ
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check the product can be combined with itself
double = testdp * 2.0
self.assertIsNotNone(double.data)
self.assertGreater(len(double.data), 0)
expected = double.data * 2.0
self.assertTrue(np.all( (double.data - expected) < 0.001 ))
descr = str(double)
self.assertIsNotNone(descr)
del descr
# When this is combined with another data product, the DATA
# array is masked with both the pixeldq and groupdq arrays.
warnings.simplefilter("ignore")
result = self.dataproduct + testdp
self.assertIsNotNone(result.data)
self.assertGreater(len(result.data), 0)
self.assertIsNotNone(result.dq)
self.assertGreater(len(result.dq), 0)
descr = str(result)
self.assertIsNotNone(descr)
del descr
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriRampModel(self.testfile) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'refout', 'pixeldq','groupdq'],
tables=['pixeldq_def', 'groupdq_def'] )
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI, REFOUR and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.refout)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
class TestMiriSlopeModel(unittest.TestCase):
# Most of the necessary tests are already carried out by
# the TestMiriMeasuredModel class.
def setUp(self):
# Create a slope data product.
a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
acube = [a1,a1,a1]
bcube = [b1,b1,b1]
ccube = [c1,c1,c1]
dcube = [a1,b1,a1]
self.dataproduct = MiriSlopeModel(data=acube, err=bcube,
dq=ccube, dq_def=master_flags,
zeropt=dcube, fiterr=dcube)
# Add some example metadata.
self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.dataproduct.set_observation_metadata()
self.dataproduct.set_target_metadata(0.0, 0.0)
self.dataproduct.set_instrument_metadata(detector='MIRIMAGE',
filt='F2550W',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='SLOW',
nints=3, ngroups=10,
frame_time=1.0,
integration_time=100.0,
group_time=1000.0,
reset_time=0, frame_resets=3)
self.testfile = "MiriSlopeModel_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_creation(self):
# Creating an object with other than 3 dimensions must fail.
a1d = [10,20,30,40]
b1d = [1,2,3,4]
c1d = [1,0,0,0]
self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d,
dq=c1d)
a2d = [a1d, a1d, a1d]
b2d = [b1d, b1d, b1d]
c2d = [c1d, c1d, c1d]
self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d,
dq=c2d)
a3d = [a2d, a2d]
b3d = [b2d, b2d]
c3d = [c2d, c2d]
a4d = [a3d, a3d]
b4d = [b3d, b3d]
c4d = [c3d, c3d]
self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d,
dq=c4d)
def test_copy(self):
# Test that a copy can be made of the data product.
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy)
assert_products_equal( self, self.dataproduct, datacopy,
arrays=['data', 'err', 'dq',
'nreads', 'readsat', 'ngoodseg',
'zeropt', 'fiterr'],
tables='dq_def' )
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriSlopeModel(self.testfile) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'err', 'dq',
'nreads', 'readsat', 'ngoodseg',
'zeropt', 'fiterr'],
tables='dq_def' )
del readback
def test_description(self):
# Test that the querying and description functions work.
# For this test to pass these only need to run without error.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
# If being run as a main program, run the tests.
if __name__ == '__main__':
unittest.main()
|
3,418 | 4a0213351f8e9dcb2c6e71317a5ff1064974652e | class Solution:
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
negative = (dividend < 0) ^ (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
result = 0
while dividend >= divisor:
shift_time = 1
while dividend >= divisor << shift_time:
shift_time += 1
dividend -= divisor << (shift_time - 1)
result += 1 << (shift_time - 1)
if negative:
result = -result
if (-1 << 31) <= result <= (1 << 31) - 1:
return result
return (1 << 31) - 1
if __name__ == '__main__':
print(Solution().divide(-2147483648, -1))
|
3,419 | 14b98186fbc9c275cea3c042cdb4899f6d0c54c6 | #!/usr/bin/python
#_*_coding:utf-8_*_
import random
def main():
source = "I couldn't believe that I could actually understand what I was reading : the phenomenal power of the human mind ."
words = source.strip().split(" ")
new_str = list()
for word in words:
if len(word) > 4:
shuffled_char = random.sample(list(word[1:-1]), len(list(word[1:-1])))
new_str.append(word[:1] + "".join(shuffled_char) + word[-1:])
else:
new_str.append(word)
print " ".join(new_str)
if __name__ == "__main__":
main()
|
3,420 | 89b03bb5ca86e426459e23866f86f8770e4a1613 | from collections import defaultdict
def solve(n, seq):
flag = True
# slot = [0] * (n + 10)
freq = defaultdict()
# refer to next free slot
i = 1
p = len(seq)
j = 0
while j < p:
c = seq[j]
if i > n:
flag = False
break
if c in freq.keys():
if freq[c] == 1:
freq[c] = 0
i -= 1
else:
freq[c] = 1
i += 1
if c not in freq.keys():
freq[c] = 1
i += 1
j += 1
if flag == True:
return 0
else:
return 1
# number of computers
n = int(input())
seq = input()
if solve(n, seq):
print("Satisfied")
else:
print("Not Satisfied") |
3,421 | 1355c3abfd2683f6dc869703fdb79a04e264099c | """For logging training information to files."""
import os
def delete_log(file_path):
"""Delete a log file.
Args:
file_path: String, the full path to the log file.
Raises:
ValueError: if file not found.
"""
if os.path.exists(file_path):
print('Deleting log %s...' % file_path)
os.remove(file_path)
else:
raise ValueError("File %r doesn't exists - cannot delete." % file_path)
class Logger:
"""For logging information to file."""
def __init__(self, file_path, print_too=True, override=False):
"""Create a new Logger.
Args:
file_path: String, the full path to the target file.
print_too: Bool, whether or not to also print logger info to terminal.
override: Bool, whether or not to delete any old files.
"""
self.file_path = file_path
self.print_too = print_too
if override:
if os.path.exists(file_path):
print('Overriding - deleting previous log...')
os.remove(file_path)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
def log(self, info):
with open(self.file_path, 'a') as file:
file.write('\n' + info)
if self.print_too:
print(info)
|
3,422 | d98db745be2ab9c506a98539b25e9b46e4997136 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 16:36:56 2018
@author: Alex
"""
#%% Import packages
import pickle
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import os
os.chdir('C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\src\\topic modeling\\')
from plotly_network import plot
#%% Load data
# Load metatopic allocations
processed_data_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\processed\\'
filename = processed_data_folder + 'topic_assignments'
with open(filename, 'rb') as fp:
topic_assignments, meta_topic_assignments = pickle.load(fp)
# Load distance matrix
filename = processed_data_folder + 'graph_and_labels'
with open(filename, 'rb') as fp:
graph_mat,topic_labels,dist_mat,doc_topic_mat = pickle.load(fp)
#%% Loop through meta-topics
plt.close()
#for meta_topic in np.unique(meta_topic_assignments):
meta_topic = 0
# Find the sub topics
sub_topics, = np.where(meta_topic_assignments == meta_topic)
# Get the distance matrix just for those topics
sub_dist_mat = dist_mat[sub_topics][:,sub_topics]
# Generate the graph matrix by selecting an appropriate threshold
graph_mat = sub_dist_mat < 0.95
if not np.any(graph_mat):
min_val = np.min(sub_dist_mat)
graph_mat = sub_dist_mat <= min_val
# Find the docs belonging to that subtopic
#docs = np.in1d(topic_assignments,sub_topics)
# Get subtopic labels
sub_topic_labels = {sub_topic:topic_labels[sub_topic] for sub_topic in sub_topics if sub_topic in topic_labels}
new_sub_topic_labels = {}
#
# Rename the keys
for counter, value in enumerate(sub_topic_labels.keys()):
new_sub_topic_labels[counter] = sub_topic_labels[value]
# Plot the graph
plt.figure()
G = nx.from_numpy_matrix(graph_mat)
#pos = nx.graphviz_layout(G)
#pos = nx.nx_agraph.graphviz_layout(G)
#pos=nx.spring_layout(G)
pos = nx.layout.circular_layout(G)
nx.relabel_nodes(G,sub_topic_labels)
nx.draw(G,pos)
nx.draw_networkx_labels(G,pos,new_sub_topic_labels,font_size=16)
node_labels = list(sub_topic_labels.values())
#%% Calculate text positions
text_pos = []
for key, value in pos.items():
if value[0] < 0:
pos_part2 = ' left'
else:
pos_part2 = ' right'
if value[1] < 0:
pos_part1 = 'bottom'
else:
pos_part1 = 'top'
text_pos.append(pos_part1 + pos_part2)
#%% Plot in plot
url = plot(G,pos,node_labels,text_pos)
|
3,423 | 18d1722529a63f9a1696b09c40dabb1c68ed55f4 | import subprocess
from flask import Flask, render_template, request
from subprocess import Popen, PIPE, check_output
def toggle_relay(value):
session = subprocess.Popen("./relay " + value, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = session.communicate()
if stderr:
raise Exception("Error "+str(stderr))
return stdout
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def home():
if request.method == 'POST':
if request.form.get("espresso_button") == 'Espresso':
toggle_relay("small")
if request.form.get("lungo_button") == 'Lungo':
toggle_relay("big")
time = request.form.get("time")
if request.form.get("radios") == 'Espresso':
Popen("echo /home/pi/coffee_maker/relay small | at now + " + time + " minutes", shell=True)
if request.form.get("radios") == 'Lungo':
Popen("echo /home/pi/coffee_maker/relay big | at now + " + time + " minutes", shell=True)
return render_template("index.html")
app.run(host='0.0.0.0', debug=True)
|
3,424 | 3a3400426b054b2fc3d060141a1f84e5db553e59 | import deform
import deform.widget
from deform import (widget) # decorator, default_renderer, field, form,
import colander
# import htmllaundry
# from htmllaundry import sanitize
from validators import (cyber_validator,
phone_validator,
stor_validator,
cou_validator,
valid_country,
valid_countries)
from .lists import (title_prefixes,
citizen_types,
employer_types,
country_codes,
has_account,
)
@colander.deferred
def deferred_country_widget(node, kw):
country_codes_data = kw.get('country_codes_data', [])
return widget.Select2Widget(values=country_codes_data)
@colander.deferred
def deferred_state_widget(node, kw):
us_states_data = kw.get('us_states_data', [])
return widget.Select2Widget(values=us_states_data)
@colander.deferred
def deferred_title_prefix_widget(node, kw):
title_prefix_data = kw.get('title_prefix_data', [])
return widget.Select2Widget(values=title_prefix_data)
email_confirm_widget = deform.widget.CheckedInputWidget(
subject='Email address',
confirm_subject='Confirm your Email address',
)
pref_email_confirm_widget = deform.widget.CheckedInputWidget(
subject='Optional Preferred Email',
confirm_subject='Confirm your optional Email address',
)
sn_widget = widget.TextInputWidget(
css_class='form-control')
class AddAccountSchema(colander.Schema):
"""
"""
# couTimestamp
cou = colander.SchemaNode(
colander.Boolean(),
title='Security and Acceptable Use Policy Acceptance',
description='Terms and Conditions Agreement - Check this if '
'you have read and agree to abide by the Center\'s '
'Security and Acceptable Use Policies.',
widget=widget.CheckboxWidget(),
validator=cou_validator,
oid='cou'
)
# storTimestamp
stor = colander.SchemaNode(
colander.Boolean(),
title='Data Security Policy Acceptance',
description='Check this if you have read and agree '
'to the Center\'s storage policies.',
widget=deform.widget.CheckboxWidget(),
validator=stor_validator,
oid='stor'
)
# cybeTimestamp
# cyber = colander.SchemaNode(
# colander.Boolean(),
# title='Cyber Security Policy Acceptance',
# description='Check this if you have read and agree to abide by '
# 'the Center\'s Cyber Security policies.',
# widget=deform.widget.CheckboxWidget(),
# validator=cyber_validator,
# oid='cyber'
# )
# titlePrefix = colander.SchemaNode(
# colander.String(),
# title='Honorary',
# description='If you prefer to use n honorary, enter it here.',
# # validator=colander.ContainsOnly([x[0] for x in title_prefixes]),
# #validator=colander.Length(min=1, max=64),
# widget=widget.TextInputWidget(placeholder="Dr., Mr., Ms., etc."),
# missing=unicode(''),
# oid='titlePrefix'
# )
givenName = colander.SchemaNode(
colander.String(),
title='Given/First name',
description='Your given or first name',
validator=colander.Length(min=1, max=64),
widget=widget.TextInputWidget(placeholder=''),
oid='givenName'
)
middleName = colander.SchemaNode(
colander.String(),
title='Middle name/initial',
description='Middle name or initial',
validator=colander.Length(min=0, max=64),
widget=widget.TextInputWidget(
placeholder=''),
missing=unicode(''),
oid='middleName'
)
sn = colander.SchemaNode(
colander.String(),
title='Family/Last Name',
description='family Name / Last Name',
validator=colander.Length(min=1, max=64),
widget=widget.TextInputWidget(
placeholder=''),
oid='sn'
)
suffix = colander.SchemaNode(
colander.String(),
title='Suffix',
description='(Sr. Jr. IV, etc.)',
validator=colander.Length(min=0, max=32),
widget=widget.TextInputWidget(placeholder='example: III, PhD, etc.'),
missing=unicode(''),
oid='suffix'
)
cn = colander.SchemaNode(
colander.String(),
title='Common or Nick Name',
description='Your full name. How you want to be addressed.',
validator=colander.Length(min=3, max=64),
widget=widget.TextInputWidget(
placeholder='(Optional) How you want to be addressed '
'if different from: FirstName LastName'),
missing=unicode(''),
oid='cn'
)
street = colander.SchemaNode(
colander.String(),
title='Street Address',
description='',
validator=colander.Length(min=0, max=200),
widget=widget.TextInputWidget(
placeholder='business/institution address'),
oid='street'
)
lcity = colander.SchemaNode(
colander.String(),
title='City',
description='',
validator=colander.Length(min=1, max=128),
widget=widget.TextInputWidget(),
oid='lcity'
)
st = colander.SchemaNode(
colander.String(),
title='State/Province',
description='',
validator=colander.Length(min=1, max=128),
widget=widget.TextInputWidget(),
oid='l'
)
postalCode = colander.SchemaNode(
colander.String(),
title='Post/ZIP Code',
description='',
validator=colander.Length(min=2, max=64),
widget=widget.TextInputWidget(),
oid='postalCode'
)
country = colander.SchemaNode(
colander.String(),
title='Country',
description='',
widget=widget.SelectWidget(values=country_codes),
#validator=colander.OneOf([x[0] for x in country_codes]),
validator=valid_country,
oid='country'
)
mail = colander.SchemaNode(
colander.String(),
title='EMail',
description='Your primary email account',
# validator=colander.Email(msg="Please provide your work Email address. This will be the primary account we use to contact you."),
widget=email_confirm_widget,
oid='mail'
)
# mailPreferred = colander.SchemaNode(
# colander.String(),
# title='Preferred EMail',
# description='optional preferred email account',
# missing=unicode(''),
# widget=pref_email_confirm_widget,
# oid='mail'
# )
phone = colander.SchemaNode(
colander.String(),
title='Phone number',
description='Please provide your primary telephone number',
validator=phone_validator,
widget=widget.TextInputWidget(),
oid='phone'
)
cell = colander.SchemaNode(
colander.String(),
title='Cell phone number',
description='For contact and verification',
validator=phone_validator,
missing=unicode(''),
widget=widget.TextInputWidget(
placeholder='(Optional) example: +1-000-000-0000'),
oid='cell'
)
employerType = colander.SchemaNode(
colander.String(),
validator=colander.OneOf([x[0] for x in employer_types]),
widget=deform.widget.RadioChoiceWidget(values=employer_types),
title='Employer Type',
description='Select the employer type from the list below that '
'is most appropriate to your request',
oid="employerType"
)
employerName = colander.SchemaNode(
colander.String(),
title='Employer, Institution, or Sponsor Name',
description='Please provide the name of your employer or '
'the institution you represent',
validator=colander.Length(min=3, max=128),
widget=widget.TextInputWidget(placeholder='employer name here'),
oid='employerName'
)
citizenStatus = colander.SchemaNode(
colander.String(),
title='Citizenship Status',
description='Select one of the following options '
'that best describes your U.S. citizenship status',
validator=colander.OneOf([x[0] for x in citizen_types]),
widget=widget.RadioChoiceWidget(values=citizen_types),
oid='citizenStatus'
)
citizenships = colander.SchemaNode(
colander.Set(),
title='Citizenships',
description='Please select your country or countries of citizenship',
validator=valid_countries,
widget=widget.Select2Widget(values=country_codes, multiple=True),
oid='citizenships',
)
# birthCountry
birthCountry = colander.SchemaNode(
colander.String(),
title='Country of birth',
description='Please enter/select your country of birth',
validator=valid_country,
widget=widget.Select2Widget(values=country_codes),
oid='birthCountry',
)
isnreluser = colander.SchemaNode(
colander.String(),
title='Existing NREL Account?',
description="Select the option that is most true for you.",
widget=deform.widget.RadioChoiceWidget(values=has_account),
missing=unicode(''),
label='Existing or Previous ESIF HPC UserID',
oid='isnreluser'
)
nrelUserID = colander.SchemaNode(
colander.String(),
title='Your Existing NREL HPC UserID',
description='If you have --or previously had-- an NREL UserID, '
'enter it here.',
validator=colander.Length(min=1, max=16),
widget=widget.TextInputWidget(placeholder='example: jsmythe'),
missing=unicode(''),
oid='nrelUserID'
)
justification = colander.SchemaNode(
colander.String(),
title='NREL HPC User Credential Information',
widget=widget.TextAreaWidget(rows=6, columns=60),
missing=unicode(''),
validator=colander.Length(max=1000),
description="If you don't have an account on NREL HPC systems, "
"we need some additional information. Please provide "
"the project handles or titles of the project allocations "
"you are associated with. "
"If you don't have an allocation, please tell us "
"why you are requesting NREL HPC login credentials.",
oid='comments'
)
preferredUID = colander.SchemaNode(
colander.String(),
title='*New* ESIF HPC UserID',
description="Please provide your desired User ID here.<sup>1</sup>"
"(3 to 16 characters, all lower case.)",
validator=colander.Length(min=3, max=16),
widget=widget.TextInputWidget(placeholder="example: jsmythe"),
missing=unicode(''),
oid='preferredUID'
)
comments = colander.SchemaNode(
colander.String(),
title='Additional Notes or Comments',
widget=deform.widget.TextAreaWidget(rows=6, columns=60,
placeholder='If you think we need any additional '
'information to process or approve your request, '
'please let us know (project name, PI, NREL contact, etc.).'),
missing=unicode(''),
validator=colander.Length(max=1000),
description='If you think we need any additional '
'information to process or approve your request, '
'please let us know.',
oid='comments'
)
# approvalStatus = colander.SchemaNode(
# colander.Integer(),
# title='Approval Status',
# description='The current status if the request review process',
# validator=deferred_review_status_validator,
# default=0,
# widget=widget.HiddenWidget(),
# missing=unicode(''),
# oid='approvalStatus'
# )
|
3,425 | 234112ec16af39b79849dd08769597771fa2c38f | #! /usr/bin/env python
from taskHandler import Location, Task, TaskFactory
import roslib; roslib.load_manifest('smart_stool')
import rospy
from geometry_msgs.msg import PoseStamped, Twist, Vector3
from nav_msgs.msg import Odometry
from kobuki_msgs.msg import BumperEvent
from move_base_msgs.msg import MoveBaseActionResult
from tf.transformations import quaternion_about_axis, euler_from_quaternion
z_axis = (0,0,1)
from math import pi
class SmartStool:
def __init__(self):
# state of the smart stool
self.odomPose = Location(0,0,0)
self.bumperTriggered = False
self.atTaskLocation = False
# defining the tasks
stool = Task('stool', 1, Location(0,0,0), 'sit')
getMail = Task('get_mail', 2, Location(4,-3,0), 'bump')
chasePets = Task('chase_pets', 3, Location(0,0,0), 'wiggle')
charge = Task('charge_battery', 4, Location(1,0,0), 'sit')
charge.activate() # charging should always be an active task
# populate the task list and set up the task factory
taskList = [stool, getMail, chasePets, charge]
self.factory = TaskFactory(taskList)
# set up the current task
self.task = self.factory.getNextTask()
# set up the subscribers
self.odom_sub = rospy.Subscriber('/odom', Odometry, self.readOdometry, queue_size=1)
self.bumper_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.readBumper, queue_size=1)
self.goalReached_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult, self.goalReached, queue_size=1)
# set up the publishers
self.moveBase_pub = rospy.Publisher('/move_base_simple/goal', PoseStamped)
self.action_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist)
def goToTask(self):
# send the smart stool to the location of its current task
current_task_location = self.task.location.copy()
goal = PoseStamped()
goal.header.frame_id = 'map'
goal.header.seq = 1
now = rospy.Time.now()
goal.header.stamp.secs = now.secs
goal.header.stamp.nsecs = now.nsecs
goal.pose.position.x = current_task_location.x
goal.pose.position.y = current_task_location.y
goal.pose.position.z = 0
quat = quaternion_about_axis(current_task_location.theta,z_axis)
goal.pose.orientation.w = quat[0]
goal.pose.orientation.x = quat[1]
goal.pose.orientation.y = quat[2]
goal.pose.orientation.z = quat[3]
self.moveBase_pub.publish(goal)
def publishTwist(self, cmd_linvel, cmd_angvel):
# publishes a Twist message to /cmd_vel_mux/input/teleop to perform custom motion actions
self.action_pub.publish(Twist(Vector3(cmd_linvel,0,0),Vector3(0,0,cmd_angvel)))
def actionHandler(self,actionName):
####
#### TODO: a change of task priority doesn't necessarily mean that the task was deactivated. Need to check
#### if original task is still in list of active tasks. if it is, do not deactivate it. if it's not, deactivate it.
#### Also need to check for other general silly mistakes
####
current_task = self.task.copy()
startLocation = self.odomPose.copy()
driveSpeed = 0.1
spinSpeed = 0.5
close_enough = 0.1
wiggle_rotate = pi/2
timeout = 10
startTime = rospy.get_time()
# execute the sit action
print actionName
if actionName == 'sit':
while (not rospy.is_shutdown()) and (self.task == current_task):
self.publishTwist(0,0)
rate.sleep()
self.task = self.factory.getNextTask()
##### TEMP #####
self.factory.activateTask('get_mail')
# execute the bump action
elif actionName == 'bump':
self.bumperTriggered = False
while not rospy.is_shutdown() and not self.bumperTriggered:
self.publishTwist(driveSpeed,0)
rate.sleep()
startTime = rospy.get_time()
while not rospy.is_shutdown() and (rospy.get_time() - startTime < 1):
self.publishTwist(-driveSpeed,0)
rate.sleep()
self.factory.deactivateTask(current_task.name)
# execute the wiggle action
elif actionName == 'wiggle':
while self.task == current_task or (rospy.get_time() - startTime > timeout):
while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,-wiggle_rotate):
self.publishTwist(0,-spinSpeed)
rate.sleep()
while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,wiggle_rotate):
self.publishTwist(0,spinSpeed)
rate.sleep()
self.task = self.factory.getNextTask()
self.factory.deactivateTask(current_task.name)
# warn that the specified action is not implemented
else:
print 'Action not implemented!'
print actionName
# stop the robot:
self.publishTwist(0,0)
def execute(self):
if self.task is None: break
current_task = self.task.copy()
self.goToTask()
# wait for the robot to be at its goal position
print 'going to task:' + current_task.name
while not self.atTaskLocation:
rate.sleep()
self.task = self.factory.getNextTask()
# if that task has changed, exit this function
if not(current_task == self.task):
return
# reset for the next task
self.atTaskLocation = False
print 'doing action'
self.actionHandler(self.task.getAction())
def readOdometry(self,msg):
# callback function to read the robot's current odometry position
odom_position = msg.pose.pose.position
odom_rotation = msg.pose.pose.orientation
self.odomPose = Location(odom_position.x,odom_position.y,euler_from_quaternion((odom_rotation.w, odom_rotation.x, odom_rotation.y, odom_rotation.z))[2])
def readBumper(self,msg):
# callback function to set the bumperTriggered flag if the bumper was hit
self.bumperTriggered = True
def goalReached(self,msg):
# callback function to determine if the current task location was reached
if msg.status.status == 3:
self.atTaskLocation = True
if __name__ == '__main__':
# initialize the node:
rospy.init_node('smart_stool')
freq = 30 # hz
rate = rospy.Rate(freq)
# set up the smart stool object
mySmartStool = SmartStool()
# wait for one second
for i in range(freq):
rate.sleep()
while not rospy.is_shutdown():
mySmartStool.execute()
rate.sleep()
#top = factory.getNextTask()
#all = factory.getAllTasks() |
3,426 | f781377a52400abd617e7f0c5529726120b78476 | import random
import time
from typing import Dict, List, Optional
from bemani.client.base import BaseClient
from bemani.protocol import Node
class ReflecBeatColette(BaseClient):
NAME = 'TEST'
def verify_pcb_boot(self, loc: str) -> None:
call = self.call_node()
pcb = Node.void('pcb')
pcb.set_attribute('method', 'boot')
pcb.add_child(Node.string('lid', loc))
call.add_child(pcb)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/pcb/sinfo/nm")
self.assert_path(resp, "response/pcb/sinfo/cl_enbl")
self.assert_path(resp, "response/pcb/sinfo/cl_h")
self.assert_path(resp, "response/pcb/sinfo/cl_m")
def verify_info_common(self) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'common')
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info/event_ctrl")
self.assert_path(resp, "response/info/item_lock_ctrl")
def verify_info_ranking(self) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'ranking')
info.add_child(Node.s32('ver', 0))
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info/ver")
self.assert_path(resp, "response/info/ranking/weekly/bt")
self.assert_path(resp, "response/info/ranking/weekly/et")
self.assert_path(resp, "response/info/ranking/weekly/new/d/mid")
self.assert_path(resp, "response/info/ranking/weekly/new/d/cnt")
self.assert_path(resp, "response/info/ranking/monthly/bt")
self.assert_path(resp, "response/info/ranking/monthly/et")
self.assert_path(resp, "response/info/ranking/monthly/new/d/mid")
self.assert_path(resp, "response/info/ranking/monthly/new/d/cnt")
self.assert_path(resp, "response/info/ranking/total/bt")
self.assert_path(resp, "response/info/ranking/total/et")
self.assert_path(resp, "response/info/ranking/total/new/d/mid")
self.assert_path(resp, "response/info/ranking/total/new/d/cnt")
def verify_player_start(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'start')
player.add_child(Node.string('rid', refid))
player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))
player.add_child(Node.u16('gp', 10573))
player.add_child(Node.u8_array('la', [16, 0, 0, 0]))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/plyid")
self.assert_path(resp, "response/player/start_time")
self.assert_path(resp, "response/player/event_ctrl")
self.assert_path(resp, "response/player/item_lock_ctrl")
self.assert_path(resp, "response/player/lincle_link_4")
self.assert_path(resp, "response/player/jbrbcollabo")
self.assert_path(resp, "response/player/tricolettepark")
def verify_player_delete(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'delete')
player.add_child(Node.string('rid', refid))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player")
def verify_player_end(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'end')
player.add_child(Node.string('rid', refid))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player")
def verify_player_succeed(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'succeed')
player.add_child(Node.string('rid', refid))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/name")
self.assert_path(resp, "response/player/lv")
self.assert_path(resp, "response/player/exp")
self.assert_path(resp, "response/player/grd")
self.assert_path(resp, "response/player/ap")
self.assert_path(resp, "response/player/released")
self.assert_path(resp, "response/player/mrecord")
def verify_player_read(self, refid: str, location: str) -> List[Dict[str, int]]:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'read')
player.add_child(Node.string('rid', refid))
player.add_child(Node.string('lid', location))
player.add_child(Node.s16('ver', 5))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/pdata/account/usrid")
self.assert_path(resp, "response/player/pdata/account/tpc")
self.assert_path(resp, "response/player/pdata/account/dpc")
self.assert_path(resp, "response/player/pdata/account/crd")
self.assert_path(resp, "response/player/pdata/account/brd")
self.assert_path(resp, "response/player/pdata/account/tdc")
self.assert_path(resp, "response/player/pdata/account/intrvld")
self.assert_path(resp, "response/player/pdata/account/ver")
self.assert_path(resp, "response/player/pdata/account/pst")
self.assert_path(resp, "response/player/pdata/account/st")
self.assert_path(resp, "response/player/pdata/base/name")
self.assert_path(resp, "response/player/pdata/base/exp")
self.assert_path(resp, "response/player/pdata/base/lv")
self.assert_path(resp, "response/player/pdata/base/mg")
self.assert_path(resp, "response/player/pdata/base/ap")
self.assert_path(resp, "response/player/pdata/base/tid")
self.assert_path(resp, "response/player/pdata/base/tname")
self.assert_path(resp, "response/player/pdata/base/cmnt")
self.assert_path(resp, "response/player/pdata/base/uattr")
self.assert_path(resp, "response/player/pdata/base/hidden_param")
self.assert_path(resp, "response/player/pdata/base/tbs")
self.assert_path(resp, "response/player/pdata/base/tbs_r")
self.assert_path(resp, "response/player/pdata/rival")
self.assert_path(resp, "response/player/pdata/fav_music_slot")
self.assert_path(resp, "response/player/pdata/custom")
self.assert_path(resp, "response/player/pdata/config")
self.assert_path(resp, "response/player/pdata/stamp")
self.assert_path(resp, "response/player/pdata/released")
self.assert_path(resp, "response/player/pdata/record")
if resp.child_value('player/pdata/base/name') != self.NAME:
raise Exception('Invalid name {} returned on profile read!'.format(resp.child_value('player/pdata/base/name')))
scores = []
for child in resp.child('player/pdata/record').children:
if child.name != 'rec':
continue
score = {
'id': child.child_value('mid'),
'chart': child.child_value('ntgrd'),
'clear_type': child.child_value('ct'),
'achievement_rate': child.child_value('ar'),
'score': child.child_value('scr'),
'combo': child.child_value('cmb'),
'miss_count': child.child_value('ms'),
}
scores.append(score)
return scores
def verify_player_write(self, refid: str, loc: str, scores: List[Dict[str, int]]) -> int:
call = self.call_node()
player = Node.void('player')
call.add_child(player)
player.set_attribute('method', 'write')
pdata = Node.void('pdata')
player.add_child(pdata)
account = Node.void('account')
pdata.add_child(account)
account.add_child(Node.s32('usrid', 0))
account.add_child(Node.s32('plyid', 0))
account.add_child(Node.s32('tpc', 1))
account.add_child(Node.s32('dpc', 1))
account.add_child(Node.s32('crd', 1))
account.add_child(Node.s32('brd', 1))
account.add_child(Node.s32('tdc', 1))
account.add_child(Node.string('rid', refid))
account.add_child(Node.string('lid', loc))
account.add_child(Node.u8('mode', 0))
account.add_child(Node.s16('ver', 5))
account.add_child(Node.bool('pp', True))
account.add_child(Node.bool('ps', True))
account.add_child(Node.s16('pay', 0))
account.add_child(Node.s16('pay_pc', 0))
account.add_child(Node.u64('st', int(time.time() * 1000)))
base = Node.void('base')
pdata.add_child(base)
base.add_child(Node.string('name', self.NAME))
base.add_child(Node.s32('exp', 0))
base.add_child(Node.s32('lv', 1))
base.add_child(Node.s32('mg', -1))
base.add_child(Node.s32('ap', -1))
base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
base.add_child(Node.bool('is_tut', True))
stglog = Node.void('stglog')
pdata.add_child(stglog)
index = 0
for score in scores:
log = Node.void('log')
stglog.add_child(log)
log.add_child(Node.s8('stg', index))
log.add_child(Node.s16('mid', score['id']))
log.add_child(Node.s8('ng', score['chart']))
log.add_child(Node.s8('col', 0))
log.add_child(Node.s8('mt', 7))
log.add_child(Node.s8('rt', 0))
log.add_child(Node.s8('ct', score['clear_type']))
log.add_child(Node.s16('grd', 0))
log.add_child(Node.s16('ar', score['achievement_rate']))
log.add_child(Node.s16('sc', score['score']))
log.add_child(Node.s16('jt_jst', 0))
log.add_child(Node.s16('jt_grt', 0))
log.add_child(Node.s16('jt_gd', 0))
log.add_child(Node.s16('jt_ms', score['miss_count']))
log.add_child(Node.s16('jt_jr', 0))
log.add_child(Node.s16('cmb', score['combo']))
log.add_child(Node.s16('exp', 0))
log.add_child(Node.s32('r_uid', 0))
log.add_child(Node.s32('r_plyid', 0))
log.add_child(Node.s8('r_stg', 0))
log.add_child(Node.s8('r_ct', -1))
log.add_child(Node.s16('r_sc', 0))
log.add_child(Node.s16('r_grd', 0))
log.add_child(Node.s16('r_ar', 0))
log.add_child(Node.s8('r_cpuid', -1))
log.add_child(Node.s32('time', int(time.time())))
log.add_child(Node.s8('decide', 0))
index = index + 1
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/uid")
return resp.child_value('player/uid')
def verify_lobby_read(self, location: str, extid: int) -> None:
call = self.call_node()
lobby = Node.void('lobby')
lobby.set_attribute('method', 'read')
lobby.add_child(Node.s32('uid', extid))
lobby.add_child(Node.u8('m_grade', 255))
lobby.add_child(Node.string('lid', location))
lobby.add_child(Node.s32('max', 128))
lobby.add_child(Node.s32_array('friend', []))
lobby.add_child(Node.u8('var', 5))
call.add_child(lobby)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/lobby/interval")
self.assert_path(resp, "response/lobby/interval_p")
def verify_lobby_entry(self, location: str, extid: int) -> int:
call = self.call_node()
lobby = Node.void('lobby')
lobby.set_attribute('method', 'entry')
e = Node.void('e')
lobby.add_child(e)
e.add_child(Node.s32('eid', 0))
e.add_child(Node.u16('mid', 79))
e.add_child(Node.u8('ng', 0))
e.add_child(Node.s32('uid', extid))
e.add_child(Node.s32('uattr', 0))
e.add_child(Node.string('pn', self.NAME))
e.add_child(Node.s16('mg', 255))
e.add_child(Node.s32('mopt', 0))
e.add_child(Node.s32('tid', 0))
e.add_child(Node.string('tn', ''))
e.add_child(Node.s32('topt', 0))
e.add_child(Node.string('lid', location))
e.add_child(Node.string('sn', ''))
e.add_child(Node.u8('pref', 51))
e.add_child(Node.s8('stg', 4))
e.add_child(Node.s8('pside', 0))
e.add_child(Node.s16('eatime', 30))
e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))
e.add_child(Node.u16('gp', 10007))
e.add_child(Node.u8_array('la', [16, 0, 0, 0]))
e.add_child(Node.u8('ver', 5))
lobby.add_child(Node.s32_array('friend', []))
call.add_child(lobby)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/lobby/interval")
self.assert_path(resp, "response/lobby/interval_p")
self.assert_path(resp, "response/lobby/eid")
self.assert_path(resp, "response/lobby/e/eid")
self.assert_path(resp, "response/lobby/e/mid")
self.assert_path(resp, "response/lobby/e/ng")
self.assert_path(resp, "response/lobby/e/uid")
self.assert_path(resp, "response/lobby/e/uattr")
self.assert_path(resp, "response/lobby/e/pn")
self.assert_path(resp, "response/lobby/e/mg")
self.assert_path(resp, "response/lobby/e/mopt")
self.assert_path(resp, "response/lobby/e/tid")
self.assert_path(resp, "response/lobby/e/tn")
self.assert_path(resp, "response/lobby/e/topt")
self.assert_path(resp, "response/lobby/e/lid")
self.assert_path(resp, "response/lobby/e/sn")
self.assert_path(resp, "response/lobby/e/pref")
self.assert_path(resp, "response/lobby/e/stg")
self.assert_path(resp, "response/lobby/e/pside")
self.assert_path(resp, "response/lobby/e/eatime")
self.assert_path(resp, "response/lobby/e/ga")
self.assert_path(resp, "response/lobby/e/gp")
self.assert_path(resp, "response/lobby/e/la")
self.assert_path(resp, "response/lobby/e/ver")
return resp.child_value('lobby/eid')
def verify_lobby_delete(self, eid: int) -> None:
call = self.call_node()
lobby = Node.void('lobby')
lobby.set_attribute('method', 'delete')
lobby.add_child(Node.s32('eid', eid))
call.add_child(lobby)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/lobby")
def verify_pzlcmt_read(self, extid: int) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'pzlcmt_read')
info.add_child(Node.s32('uid', extid))
info.add_child(Node.s32('tid', 0))
info.add_child(Node.s32('time', 0))
info.add_child(Node.s32('limit', 30))
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info/comment/time")
self.assert_path(resp, "response/info/c/uid")
self.assert_path(resp, "response/info/c/name")
self.assert_path(resp, "response/info/c/icon")
self.assert_path(resp, "response/info/c/bln")
self.assert_path(resp, "response/info/c/tid")
self.assert_path(resp, "response/info/c/t_name")
self.assert_path(resp, "response/info/c/pref")
self.assert_path(resp, "response/info/c/time")
self.assert_path(resp, "response/info/c/comment")
self.assert_path(resp, "response/info/c/is_tweet")
# Verify we posted our comment earlier
found = False
for child in resp.child('info').children:
if child.name != 'c':
continue
if child.child_value('uid') == extid:
name = child.child_value('name')
comment = child.child_value('comment')
if name != self.NAME:
raise Exception('Invalid name \'{}\' returned for comment!'.format(name))
if comment != 'アメ〜〜!':
raise Exception('Invalid comment \'{}\' returned for comment!'.format(comment))
found = True
if not found:
raise Exception('Comment we posted was not found!')
def verify_pzlcmt_write(self, extid: int) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'pzlcmt_write')
info.add_child(Node.s32('uid', extid))
info.add_child(Node.string('name', self.NAME))
info.add_child(Node.s16('icon', 0))
info.add_child(Node.s8('bln', 0))
info.add_child(Node.s32('tid', 0))
info.add_child(Node.string('t_name', ''))
info.add_child(Node.s8('pref', 51))
info.add_child(Node.s32('time', int(time.time())))
info.add_child(Node.string('comment', 'アメ〜〜!'))
info.add_child(Node.bool('is_tweet', True))
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info")
def verify_jbrbcollabo_save(self, refid: str) -> None:
call = self.call_node()
jbrbcollabo = Node.void('jbrbcollabo')
jbrbcollabo.set_attribute('method', 'save')
jbrbcollabo.add_child(Node.string('ref_id', refid))
jbrbcollabo.add_child(Node.u16('cre_count', 0))
call.add_child(jbrbcollabo)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/jbrbcollabo")
def verify(self, cardid: Optional[str]) -> None:
# Verify boot sequence is okay
self.verify_services_get(
expected_services=[
'pcbtracker',
'pcbevent',
'local',
'message',
'facility',
'cardmng',
'package',
'posevent',
'pkglist',
'dlstatus',
'eacoin',
'lobby',
'ntp',
'keepalive'
]
)
paseli_enabled = self.verify_pcbtracker_alive()
self.verify_message_get()
self.verify_package_list()
location = self.verify_facility_get()
self.verify_pcbevent_put()
self.verify_pcb_boot(location)
self.verify_info_common()
# Verify card registration and profile lookup
if cardid is not None:
card = cardid
else:
card = self.random_card()
print("Generated random card ID {} for use.".format(card))
if cardid is None:
self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled)
ref_id = self.verify_cardmng_getrefid(card)
if len(ref_id) != 16:
raise Exception('Invalid refid \'{}\' returned when registering card'.format(ref_id))
if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled):
raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id))
# Always get a player start, regardless of new profile or not
self.verify_player_start(ref_id)
self.verify_player_delete(ref_id)
self.verify_player_succeed(ref_id)
extid = self.verify_player_write(
ref_id,
location,
[{
'id': 0,
'chart': 0,
'clear_type': -1,
'achievement_rate': 0,
'score': 0,
'combo': 0,
'miss_count': 0,
}]
)
else:
print("Skipping new card checks for existing card")
ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled)
# Verify pin handling and return card handling
self.verify_cardmng_authpass(ref_id, correct=True)
self.verify_cardmng_authpass(ref_id, correct=False)
if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled):
raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id))
# Verify lobby functionality
self.verify_lobby_read(location, extid)
eid = self.verify_lobby_entry(location, extid)
self.verify_lobby_delete(eid)
# Verify puzzle comment read and write
self.verify_pzlcmt_write(extid)
self.verify_pzlcmt_read(extid)
# Verify Jubeat/ReflecBeat collabo save
self.verify_jbrbcollabo_save(ref_id)
if cardid is None:
# Verify score saving and updating
for phase in [1, 2]:
if phase == 1:
dummyscores = [
# An okay score on a chart
{
'id': 1,
'chart': 1,
'clear_type': 2,
'achievement_rate': 7543,
'score': 432,
'combo': 123,
'miss_count': 5,
},
# A good score on an easier chart of the same song
{
'id': 1,
'chart': 0,
'clear_type': 4,
'achievement_rate': 9876,
'score': 543,
'combo': 543,
'miss_count': 0,
},
# A bad score on a hard chart
{
'id': 3,
'chart': 2,
'clear_type': 2,
'achievement_rate': 1234,
'score': 123,
'combo': 42,
'miss_count': 54,
},
# A terrible score on an easy chart
{
'id': 3,
'chart': 0,
'clear_type': 2,
'achievement_rate': 1024,
'score': 50,
'combo': 12,
'miss_count': 90,
},
]
if phase == 2:
dummyscores = [
# A better score on the same chart
{
'id': 1,
'chart': 1,
'clear_type': 3,
'achievement_rate': 8765,
'score': 469,
'combo': 468,
'miss_count': 1,
},
# A worse score on another same chart
{
'id': 1,
'chart': 0,
'clear_type': 2,
'achievement_rate': 8765,
'score': 432,
'combo': 321,
'miss_count': 15,
'expected_score': 543,
'expected_clear_type': 4,
'expected_achievement_rate': 9876,
'expected_combo': 543,
'expected_miss_count': 0,
},
]
self.verify_player_write(ref_id, location, dummyscores)
scores = self.verify_player_read(ref_id, location)
for expected in dummyscores:
actual = None
for received in scores:
if received['id'] == expected['id'] and received['chart'] == expected['chart']:
actual = received
break
if actual is None:
raise Exception("Didn't find song {} chart {} in response!".format(expected['id'], expected['chart']))
if 'expected_score' in expected:
expected_score = expected['expected_score']
else:
expected_score = expected['score']
if 'expected_achievement_rate' in expected:
expected_achievement_rate = expected['expected_achievement_rate']
else:
expected_achievement_rate = expected['achievement_rate']
if 'expected_clear_type' in expected:
expected_clear_type = expected['expected_clear_type']
else:
expected_clear_type = expected['clear_type']
if 'expected_combo' in expected:
expected_combo = expected['expected_combo']
else:
expected_combo = expected['combo']
if 'expected_miss_count' in expected:
expected_miss_count = expected['expected_miss_count']
else:
expected_miss_count = expected['miss_count']
if actual['score'] != expected_score:
raise Exception('Expected a score of \'{}\' for song \'{}\' chart \'{}\' but got score \'{}\''.format(
expected_score, expected['id'], expected['chart'], actual['score'],
))
if actual['achievement_rate'] != expected_achievement_rate:
raise Exception('Expected an achievement rate of \'{}\' for song \'{}\' chart \'{}\' but got achievement rate \'{}\''.format(
expected_achievement_rate, expected['id'], expected['chart'], actual['achievement_rate'],
))
if actual['clear_type'] != expected_clear_type:
raise Exception('Expected a clear_type of \'{}\' for song \'{}\' chart \'{}\' but got clear_type \'{}\''.format(
expected_clear_type, expected['id'], expected['chart'], actual['clear_type'],
))
if actual['combo'] != expected_combo:
raise Exception('Expected a combo of \'{}\' for song \'{}\' chart \'{}\' but got combo \'{}\''.format(
expected_combo, expected['id'], expected['chart'], actual['combo'],
))
if actual['miss_count'] != expected_miss_count:
raise Exception('Expected a miss count of \'{}\' for song \'{}\' chart \'{}\' but got miss count \'{}\''.format(
expected_miss_count, expected['id'], expected['chart'], actual['miss_count'],
))
# Sleep so we don't end up putting in score history on the same second
time.sleep(1)
else:
print("Skipping score checks for existing card")
# Verify ending game
self.verify_player_end(ref_id)
# Verify high score tables
self.verify_info_ranking()
# Verify paseli handling
if paseli_enabled:
print("PASELI enabled for this PCBID, executing PASELI checks")
else:
print("PASELI disabled for this PCBID, skipping PASELI checks")
return
sessid, balance = self.verify_eacoin_checkin(card)
if balance == 0:
print("Skipping PASELI consume check because card has 0 balance")
else:
self.verify_eacoin_consume(sessid, balance, random.randint(0, balance))
self.verify_eacoin_checkout(sessid)
|
3,427 | ec9a152e39a0c51319e4db58eea4496cff5b2fd6 | import numpy
from nn_functor import functions
class Linear(functions.Learn):
def implement(self, a, p):
x = a[0]
w, b0 = p
return w.dot(x) + b0
def update(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
u_w = w - self.eps * (i - b)[:, None].dot(x[None, :])
u_b0 = b0 - self.eps * (i - b)
return u_w, u_b0
def request(self, a, b, p):
i = self.implement(a, p)
x = a[0]
w, b0 = p
return x - w.T.dot(i - b),
class LinearNode(functions.Node):
def __init__(self, in_size, out_size, eps):
super().__init__(Linear(eps))
self.param_name = [
"w", "b"
]
self.w = numpy.random.randn(out_size, in_size)
self.b = numpy.random.randn(out_size)
|
3,428 | ac1ac80739bed0cebf7a89a7d55e1b4fa6c68cdf | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
#return HttpRequest("Hi This is SAU5081 page.")
return render(request, "sau5081/sau5081.html") |
3,429 | 4a9c42727a28e19cf1eebcf72784b85bbae695bf | #1.25.2019 - shashi
#Program that accepts an array of different elements
#And moves all the integer 0s to the end of it. String 0s like "0" or "0.0" remain untouched.
def shiftZeroesToEnd(myArray): #function starts here
zeroCounter = 0 #counter to keep track of how many 0s exist.
shiftedArray = [] #array to hold final output
for item in myArray: #loop through each item in array
if (str(item) == "0" or str(item) == "0.0") and type(item) is not str:
zeroCounter += 1 #if numeric string found, incremenet zero counter
else:
shiftedArray.append(item) #else add item from original list as is (same position)
#end of loop
zeroStore = [0 for i in range(zeroCounter)] #declare an array of 0s of the size of zeroCounter
shiftedArray.extend(zeroStore) #append it to final output list (adds it to the end)
return shiftedArray #return final output back
#testing function
print(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, "b", "a"]))
print(shiftZeroesToEnd([0.0, 23, -3, False, "xxx", 0, 112, True , 9]))
|
3,430 | cee9deeeabfec46ee5c132704e8fd653e55987f3 | # coding: utf-8
import logging
def __gen_logger():
result = logging.getLogger('superslick')
return result
logger = __gen_logger() |
3,431 | c74fc99bf8582fd83c312f27dfffbe894a2c8c1b | import os
import sqlite3
import operator
from collections import OrderedDict
import matplotlib.pyplot as plt
def parse(url):
try:
parsed_url_components = url.split('//')
sublevel_split = parsed_url_components[1].split('/', 1)
domain = sublevel_split[0].replace("www.", "")
return domain
except IndexError:
print("URL format error!")
def analyze(results):
prompt = input("[.] Type <c> to print or <p> to plot\n[>] ")
if prompt == "c":
for site, count in list(sites_count_sorted.items()):
print(site, count)
elif prompt == "p":
plt.bar(list(range(len(results))), list(results.values()), align='edge')
plt.xticks(rotation=45)
plt.xticks(list(range(len(results))), list(results.keys()))
plt.show()
else:
print("[.] Uh?")
quit() |
3,432 | af7a124c873dda02ba2a78e85965aa243d791863 | # -*- coding: utf-8 -*-
import os
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
# github
GITHUB_OAUTH2 = {
#github上获取
'client_id': '',
'client_secret': '',
'callback_url': '',
'scope': 'user',
'auth_url': 'http://github.com/login/oauth/authorize?client_id={client_id}&scope={scope}&state={csrf}'
'&redirect_uri={redirect_uri}',
}
# access_token过期时间设置,单位天
ACCESS_TOKEN_EXP = 30
# cookie 名称
AUTH_COOKIE_NAME = 'token'
# elastisearch配置,docker配置,所以host直接使用名称,正常情况为ip
ELASTICSEARCH_URL = "elasticsearch:9200"
# scrapyd配置
SCRAPYD_URL = "http://127.0.0.1:6800"
SCRAPY_PROJECT_NAME = "spider_tophub"
# 爬虫scheduler配置
JOBSTORES={'default': SQLAlchemyJobStore(url='mysql+pymysql://your_user:your_user_password@mysql:3306/your_databases')}
JOB_DEFAULTS={
'coalesce': True,
'max_instances': 1
}
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'mysql+pymysql://your_user:your_user_password@mysql:3306/your_databases'
# 关闭flask_sqlalchemy事件系统
SQLALCHEMY_TRACK_MODIFICATIONS = False
config = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
} |
3,433 | cdb49af584ae7befcaebfd9bb303073c8229667e | #!/usr/bin/python
import os
import psycopg2
import datetime
import time
import json
import decimal
import requests
import csv
import asyncio
from config import get_connection, get_db_sql, get_sql_record_count, CORP_TYPES_IN_SCOPE, corp_num_with_prefix, bare_corp_num
from orgbook_data_load import (
get_orgbook_all_corps, get_orgbook_all_corps_csv,
get_event_proc_future_corps, get_event_proc_future_corps_csv,
get_bc_reg_corps, get_bc_reg_corps_csv,
get_agent_wallet_ids, append_agent_wallet_ids
)
QUERY_LIMIT = '200000'
REPORT_COUNT = 10000
ERROR_THRESHOLD_COUNT = 5
# value for PROD is "https://orgbook.gov.bc.ca/api/v3"
ORGBOOK_API_URL = os.environ.get('ORGBOOK_API_URL', 'http://localhost:8081/api/v3')
TOPIC_QUERY = "/topic/registration.registries.ca/"
TOPIC_NAME_SEARCH = "/search/topic?inactive=false&latest=true&revoked=false&name="
TOPIC_ID_SEARCH = "/search/topic?inactive=false&latest=true&revoked=false&topic_id="
# value for PROD is "https://agent-admin.orgbook.gov.bc.ca/credential/"
AGENT_API_URL = os.environ.get("AGENT_API_URL", "http://localhost:8021/credential/")
AGENT_API_KEY = os.environ.get("AGENT_API_KEY")
# default is to audit active (non-revoked) credentials
AUDIT_ALL_CREDENTIALS = (os.environ.get("AUDIT_ALL_CREDENTIALS", "false").lower() == 'true')
"""
Detail audit report - credential list from orgbook.
Reads from the orgbook database:
- wallet id for each credential
"""
async def process_credential_queue():
# preload agent wallet id's
print("Get exported wallet id's from agent", datetime.datetime.now())
agent_wallet_ids = get_agent_wallet_ids()
print("# wallet id's:", len(agent_wallet_ids))
conn = None
try:
conn = get_connection('org_book')
except (Exception) as error:
print(error)
raise
# get all the corps from orgbook
print("Get credential stats from OrgBook DB", datetime.datetime.now())
cred_filter = " and not credential.revoked " if not AUDIT_ALL_CREDENTIALS else ""
sql4 = """select
credential.credential_id, credential.id, credential.topic_id, credential.update_timestamp,
topic.source_id, credential.credential_type_id, credential_type.description,
credential.revoked, credential.inactive, credential.latest,
credential.effective_date, credential.revoked_date, credential.revoked_by_id
from credential, topic, credential_type
where topic.id = credential.topic_id""" + cred_filter + """
and credential_type.id = credential.credential_type_id
order by id;"""
corp_creds = []
try:
cur = conn.cursor()
cur.execute(sql4)
for row in cur:
corp_creds.append({
'credential_id': row[0], 'id': row[1], 'topic_id': row[2], 'timestamp': row[3],
'source_id': row[4], 'credential_type_id': row[5], 'credential_type': row[6],
'revoked': row[7], 'inactive': row[8], 'latest': row[9],
'effective_date': row[10], 'revoked_date': row[11], 'revoked_by': row[12]
})
cur.close()
except (Exception) as error:
print(error)
raise
print("# orgbook creds:", len(corp_creds), datetime.datetime.now())
i = 0
agent_checks = 0
cache_checks = 0
missing = []
extra_cred = []
not_in_cache = []
print("Checking for valid credentials ...", datetime.datetime.now())
while i < len(corp_creds):
# if cached we are good, otherwise check agent via api
if not corp_creds[i]['credential_id'] in agent_wallet_ids.keys():
agent_checks = agent_checks + 1
api_key_hdr = {"x-api-key": AGENT_API_KEY}
url = AGENT_API_URL + corp_creds[i]['credential_id']
#print(i, url)
try:
response = requests.get(url, headers=api_key_hdr)
response.raise_for_status()
if response.status_code == 404:
raise Exception("404 not found")
else:
wallet_credential = response.json()
# exists in agent but is not in cache
not_in_cache.append(corp_creds[i])
except Exception as e:
if (corp_creds[i]['revoked'] and corp_creds[i]['revoked_by'] is not None and
corp_creds[i]['effective_date'] == corp_creds[i]['revoked_date']):
print("Extra cred in TOB:", i, corp_creds[i]['credential_id'])
extra_cred.append(corp_creds[i])
else:
print(
"Exception:", i, corp_creds[i]['credential_id'],
corp_creds[i]['topic_id'], corp_creds[i]['source_id'], corp_creds[i]['credential_type'],
corp_creds[i]['revoked'], corp_creds[i]['inactive'], corp_creds[i]['latest'],
corp_creds[i]['timestamp'],
)
missing.append(corp_creds[i])
else:
cache_checks = cache_checks + 1
i = i + 1
if 0 == i % 100000:
print(i)
append_agent_wallet_ids(not_in_cache)
print("Total # missing in wallet:", len(missing), ", Extra:", len(extra_cred), datetime.datetime.now())
print("Cache checks:", cache_checks, ", Agent checks:", agent_checks)
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(process_credential_queue())
except Exception as e:
print("Exception", e)
raise
|
3,434 | 306240db8a1652fe7cd79808c40e4354c3158d3e | import json
import sys
import time
# boardName pageNum indexNewest
# Baseball 5000 5183
# Elephants 3500 3558
# Monkeys 3500 3672
# Lions 3300 3381
# Guardians 3500 3542
boardNameList = ["Baseball", "Elephants", "Monkeys", "Lions", "Guardians"]
def loadData(filename):
_data = json.loads(open(filename).read())
return _data
def buildUserDict(userDict, _data, boardName):
#各版發文數 發文總推數 發文總噓數 發文總->數 各版推文數 各板噓文數 各版->數
#article article_g article_b article_n g b n
# userDict = dict()
for article in _data:
_user = article['b_作者'].split(" ")[0]
if not _user in userDict:
userDict[_user] = dict()
if not boardName in userDict[_user]:
userDict[_user][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}
userDict[_user][boardName]['article'] += 1
userDict[_user][boardName]['article_g'] += article['h_推文總數']['g']
userDict[_user][boardName]['article_b'] += article['h_推文總數']['b']
userDict[_user][boardName]['article_n'] += article['h_推文總數']['n']
responses = article['g_推文']
for res in responses:
resUser = responses[res]['留言者']
if not resUser in userDict:
userDict[resUser] = dict()
if not boardName in userDict[resUser]:
userDict[resUser][boardName] = {'article':0,'article_g':0,'article_b':0,'article_n':0,'g':0,'b':0,'n':0}
if responses[res]['狀態'] == u'噓 ':
userDict[resUser][boardName]['b'] += 1
elif responses[res]['狀態'] == u'推 ':
userDict[resUser][boardName]['g'] += 1
else:
userDict[resUser][boardName]['n'] += 1
return userDict
def printFeature2File(userDict, filename):
_file = open(filename, "w")
json.dump(userDict,_file)
_file.close()
if __name__ == "__main__":
# filename = str(sys.argv[1])
featureFileOut = str(sys.argv[1])
dataDir = "../data/"
filenameList = ['data-Baseball-5000-2017-06-29-03-25-05.json','data-Elephants-3500-2017-06-29-03-30-22.json',
'data-Monkeys-3500-2017-06-29-03-31-55.json','data-Guardians-3500-2017-06-29-04-12-43.json',
'data-Lions-3300-2017-06-29-04-11-50.json']
#python3 extractFeatures.py ../data/userFeatureTest.json
total_start = time.time()
_start = time.time()
userDict = dict()
for index in range(len(filenameList)):
print("Loading data from "+boardNameList[index]+" ...")
_data = loadData(dataDir+filenameList[index])
print("number of articles : "+str(len(_data)))
print("Cost time : "+str(time.time()-_start)+" secs")
_start = time.time()
print("Building user dict...")
boardName = boardNameList[index]
userDict = buildUserDict(userDict, _data, boardName)
print("Total user number : "+str(len(userDict.keys())))
print("Cost time : "+str(time.time()-_start)+" secs")
_start = time.time()
print("Extract user features...")
printFeature2File(userDict, featureFileOut)
print("Cost time : "+str(time.time()-_start)+" secs")
print("Total cost time : "+str(time.time()-total_start)+" secs")
_start = time.time()
# for dd in _data:
# print("=====================================")
# print(dd['b_作者'].split(" ")[0])
# print(dd['h_推文總數']['b'])
# print(dd['h_推文總數']['g'])
# print(dd['h_推文總數']['all'])
# res = dd['g_推文']
# goodResList = list()
# BooResList = list()
# neutralResList = list()
# for rr in res:
# if res[rr]['狀態'] == u'噓 ':
# BooResList.append(res[rr]['留言者'])
# elif res[rr]['狀態'] == u'推 ':
# goodResList.append(res[rr]['留言者'])
# else:
# neutralResList.append(res[rr]['留言者'])
# print("噓"+str(BooResList))
# print("推"+str(goodResList))
# print("->"+str(neutralResList))
# print(_data[0]['c_標題'])
# print(_data[0]['h_推文總數'])
# print(_data[0]['g_推文']) |
3,435 | c66b07c45f4a675a6c7fcec82048a3197910d0d8 | marks = {
"S":"subject",
"O":"object",
"A":"attribute",
"C":"clause",
}
marks_reverse = {
"subject":"S",
"object":"O",
"attribute":"A",
"clause":"C",
}
|
3,436 | cbbe273a19a4e60b760e35aeb8d43972a46760f5 | import tensorflow as tf
from models.base_model import BaseModel
from utils.im_utils import batch_convert_2_int
from datasets.single_dataset import SingleDataset
from datasets.unpaired_dataset import UnpairedDataset
from models.generators.maskshadowgan_generators import Generator
from models.discriminators.maskshadowgan_discriminators import Discriminator
class MaskShadowGANModel(BaseModel):
"""
Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.
A: shadow images domain
B: shadow free images domain
Paper: https://arxiv.org/pdf/1903.10683.pdf
"""
def __init__(self, opt, training):
BaseModel.__init__(self, opt, training)
# create placeholders for images and shadow masks
self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()
else: # only need shadow dataset for testing
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
# add ops for generator (A->B) to graph
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,
init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='G')
if self.training:
# add ops for other generator (B->A) and discriminators to graph
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')
# generate fake images
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
# generate reconstructed images
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
# generate identity mapping images
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))
# add loss ops to graph
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,
reconstructedB, identA, identB)
# add optimizer ops to graph
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else: # only need generator from A->B during testing
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):
"""
Compute the losses for the generators and discriminators.
"""
# compute the generators loss
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
# Compute the disciminators loss. Use fake images from image pool to improve stability
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))
return loss
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
"""
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
"""
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
and a linearly decaying rate that goes to zero over the next 100k steps
"""
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = self.opt.lr
end_learning_rate = 0.0
start_decay_step = self.opt.niter
decay_steps = self.opt.niter_decay
beta1 = self.opt.beta1
learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),
tf.train.polynomial_decay(starter_learning_rate,
global_step-start_decay_step,
decay_steps, end_learning_rate,
power=1.0),
starter_learning_rate))
learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)
.minimize(loss, global_step=global_step, var_list=variables))
return learning_step
Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')
D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')
D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')
with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):
return tf.no_op(name='optimizers')
|
3,437 | 6334a8a052d72b0f13395b301bd5a766acf4399b | from utils import create_data_lists
if __name__ == '__main__':
create_data_lists(
ICDAR_path='../ICDAR_Dataset/0325updated.task1train(626p)',
output_folder='../ICDAR_Dataset/0325updated.task1train(626p)')
|
3,438 | c846c33ef13795d51c6d23ffa5a6b564b66e6a3c | from pathlib import Path
from build_midi.appenders import *
from build_midi.converters import Converter
from build_midi.melody_builder import MelodyBuilder
from build_midi.sequences import *
from build_midi.tracks import *
from music_rules.instruments import Instruments
from music_rules.music_scale import MusicScale
from weather.weather_api import WeatherApi
class WeatherToMusicConverter:
PHRASE_LENGTH = 1200
OUTPUT_FILE_DIR = 'midi_out'
music_scales = MusicScale()
def weather_to_music(self, api_key, city) -> MidiFile:
api_handling = WeatherApi()
converter = Converter()
weather_forecast = api_handling.get_weather_forecast_from_api(city, api_key)
average_temperature = converter.average_temperature(weather_forecast.weather_timestamps)
ticks_per_beat = converter.average_temperature_to_ticks_per_beat(average_temperature)
outfile = MidiFile()
outfile.ticks_per_beat = ticks_per_beat
melody_builder = MelodyBuilder(outfile, self.PHRASE_LENGTH)
temperature = TemperatureTrack(1, Instruments.BrightAcousticPiano)
rain = RainTrack(2, Instruments.Celesta)
clouds = CloudsTrack(3, Instruments.TremoloStrings)
humidity = HumidityTrack(4, Instruments.ElectricGuitar_clean)
wind = WindTrack(5, Instruments.Seashore)
for track in [temperature, rain, clouds, humidity, wind]:
melody_builder.set_instrument(track.get_track(), track.get_channel(), track.get_instrument())
for entry in weather_forecast.weather_timestamps:
base_note = converter.temperature_to_base_note(entry.temperature.feels_like)
music_scale = self.music_scales.melodic_minor(base_note)
temperature_sequence = TemperatureSequence(entry.temperature, self.PHRASE_LENGTH, base_note, temperature.get_track())
temperature_appender = TemperatureAppender()
temperature_appender.append(melody_builder, temperature_sequence, temperature)
rain_sequence = RainSequence(entry.weather.rain, self.PHRASE_LENGTH, base_note, rain.get_track(), music_scale)
rain_appender = RainAppender()
rain_appender.append(melody_builder, rain_sequence, rain)
clouds_sequence = CloudsSequence(entry.weather.clouds, self.PHRASE_LENGTH, base_note, clouds.get_track())
clouds_appender = CloudsAppender()
clouds_appender.append(melody_builder, clouds_sequence, clouds)
humidity_sequence = HumiditySequence(entry.weather.humidity, self.PHRASE_LENGTH, base_note, humidity.get_track())
humidity_appender = HumidityAppender()
humidity_appender.append(melody_builder, humidity_sequence, humidity)
wind_sequence = WindSequence(entry.weather.wind_speed, self.PHRASE_LENGTH, base_note, wind.get_track())
wind_appender = WindAppender()
wind_appender.append(melody_builder, wind_sequence, wind)
for track in [temperature.get_track(), rain.get_track(), clouds.get_track(), humidity.get_track(), wind.get_track()]:
outfile.tracks.append(track)
file_name = 'weather_song_' + weather_forecast.city + '_' + weather_forecast.country + '_' + str(weather_forecast.weather_timestamps[0].timestamp)
self.save_file(outfile, self.OUTPUT_FILE_DIR, file_name)
return outfile
def save_file(self, outfile: MidiFile, file_dir: str, file_name: str) -> MidiFile:
Path(file_dir).mkdir(exist_ok=True)
file_path = file_dir + '/' + file_name + '.mid'
outfile.save(file_path)
print('file saved at ' + file_path)
return outfile
def get_midi_track_time(self, midi_track: MidiTrack):
sum = 0
for message in midi_track:
sum += message.time
return sum
|
3,439 | 0c736bb5c88a8d7ee359e05fe12f0b77d83146c8 | #!/usr/bin/python2.7
#
# Assignment2 Interface
#
import psycopg2
import os
import sys
import Assignment1 as a
# Donot close the connection inside this file i.e. do not perform openconnection.close()
#range__metadata = RangeRatingsMetadata
#roundR_metadata = RoundRobinRatingsMetadata
#rangetablepartition = rangeratingspart
def RangeQuery(ratingsTableName, ratingMinValue, ratingMaxValue, openconnection):
try:#Implement RangeQuery Here.
cur = openconnection.cursor()
ratings_Min = ratingMinValue
ratings_Max = ratingMaxValue
if ((0.0<=ratings_Min <= 5.0) and (0.0<=ratings_Max<= 5.0) and (ratings_Max >=ratings_Min)):
cur.execute("SELECT maxrating from rangeratingsmetadata") ### Lines to make him look at
upperbound_range = cur.fetchall() #print the last column of the select function execute above
i=0
#print upperbound_range
while(1):
#print upperbound_range[i][0]
if (ratings_Min > upperbound_range[i][0]):
i = i+1
else:
lower_bound = i
#print "the lower table index is", lower_bound
break
i = 0
while(1):
if (ratings_Max > upperbound_range[i][0]):
i = i+1
else:
upper_bound = i
#print "the upper table index is", upper_bound
break
range_list_table_lookup = range(lower_bound,upper_bound+1)
#print range_list_table_lookup
file = open("RangeQueryOut.txt","w")
for l in range_list_table_lookup:
rows = []
cur.execute('SELECT * from rangeratingspart' + str(l)) ### Lines to make him look at
rows = cur.fetchall()
#print rows
### Lines to make him look at
for row in rows:
rat = row[2]
if (ratings_Min <= rat <= ratings_Max):
file.write("{},{},{},{} \n".format("rangeratingspart" + str(l),row[0],row[1],row[2])) ### Lines to make him look at
#file.close()
cur.execute('SELECT * from RoundRobinRatingsMetadata')
numberofpartitionslist = cur.fetchall()
numberofpartitions = numberofpartitionslist[0][0]
for l in range(numberofpartitions):
cur.execute('SELECT * from RoundRobinRatingsPart' + str(l)) ### Lines to make him look at
rows = []
rows = cur.fetchall()
for row in rows:
rat = row[2]
if (ratings_Min <= rat <= ratings_Max):
file.write("{},{},{},{} \n".format("roundrobinratingspart" + str(l),row[0],row[1],row[2])) ### Lines to make him look at
file.close()
else:
print ("Please enter the valid values")
cur.close()
except Exception as E:
print E
def PointQuery(ratingsTableName, ratingValue, openconnection):
#Implement PointQuery Here.
# Remove this once you are done with implementation
cur = openconnection.cursor()
pointvalue = ratingValue
if ((0.0<=pointvalue<= 5.0)):
cur.execute('SELECT maxrating from RangeRatingsMetadata')
Range_upper = cur.fetchall()
i=0
while(1):
if (pointvalue > Range_upper[i][0]):
i = i+1
else:
table_suffxi = i
#print "the table suffix to look is", table_suffix
break
rows = []
cur.execute('SELECT * from rangeratingspart'+str(table_suffix))
rows = cur.fetchall()
file1 = open("PointQueryOut.txt","w")
for row in rows:
rat = row[2]
if (rat == pointvalue):
file1.write("{},{},{},{} \n".format("rangeratingspart"+str(table_suffix),row[0],row[1],row[2]))
#file1.close()
cur.execute('SELECT * from RoundRobinRatingsMetadata')
numberofpartitionslist = cur.fetchall()
numberofpartitions = numberofpartitionslist[0][0]
for l in range(numberofpartitions):
cur.execute('SELECT * from RoundRobinRatingsPart'+str(l))
rows = []
rows = cur.fetchall()
#file1 = open("PointQueryOut.txt","w")
for row in rows:
rat = row[2]
if (rat == pointvalue):
file1.write ("{},{},{},{} \n".format("roundrobinratingspart" + str(l),row[0],row[1],row[2]))
file1.close()
else:
print("please enter a valid rating value")
cur.close()
|
3,440 | da0076ab18531e5b8a1de909cb9178de6327d6b0 | """
The MIT License (MIT)
Copyright (c) 2015 Tommy Carpenter
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
"""
Warning; here be dragons. Documentation needed.
No try excepts here unless the MR job can complete without them!
Fail fast and have the exception stack show up in the Hadoop interface logs
"""
def _filtering_parsing_helper(filter_cols_key, filter_vals_key, filter_invert_key):
filter_vals = os.environ[filter_vals_key].split("|")
inverts = [int(y) for y in os.environ[filter_invert_key].split("|")]
filter_dict = {}
for xindex, x in enumerate([int(y) for y in os.environ[filter_cols_key].split("|")]):
filter_dict[x] = {}
filter_dict[x]["filter_vals"] = filter_vals[xindex].split(",")
filter_dict[x]["invert"] = inverts[xindex]
return filter_dict
def _filtering_passed_helper(filter_dict, vals):
yield_row = True
for filter_col in filter_dict.keys():
if (filter_dict[filter_col]["invert"] and vals[filter_col] in filter_dict[filter_col]["filter_vals"]) or (not filter_dict[filter_col]["invert"] and vals[filter_col] not in filter_dict[filter_col]["filter_vals"]):
yield_row = False
break
return yield_row
def _kv_helper(cache, value):
"""shared code between select_where and select_join
splits vals, see if filtering passes, forms the key from key_columns and forms the values from target_columns
"""
vals = [v.replace('"','') for v in value.split(cache["delimiter"])]
if "filtering" not in cache or _filtering_passed_helper(cache["filtering"], vals): #yield if filtering criteria met or no filtering criteria
k = "+".join(vals) if cache["key_columns"] == "*" else "+".join(vals[l] for l in cache["key_columns"])
v = ",".join(vals) if cache["target_columns"] == "*" else ",".join([vals[l] for l in cache["target_columns"]])
return k, v
return None, None
def identity_mapper(key, value):
""" Does Nothing; used when all work done in reduce phase
"""
yield key, value
def token_count_mapper(key, value):
""" Purpose:
Splits lines of each file by whitespace (value holds lines), and emits (token, 1) for each token in the line.
When combined with a basic count reducer, implements wordcount.
Args:
key: byte offset (not used in this function)
value: (string)
Yields:
A series of tuples of the form (key, 1)
"""
for token in value.split():
yield token, 1
def select_where(key, value, cache={}):
"""
PURPOSE:
When combined with an identiy reducer this implements:
SELECT (k, v)
where k = target_column_1+target_column_2+...,+target_column_N,
where v = target_column_1, ..., target_column_N
FROM (input dataset)
GROUP BY target_column_1, ..., target_column_N;
WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...
When combined with a count reducer this implements:
SELECT (k, v)
where k = target_column_1+target_column_2+...,+target_column_N,
where v = count(*)
FROM (input dataset)
GROUP BY target_column_1, ..., target_column_N;
WHERE filter_column_1 (not) in [filter_vals_1] and filter_column_2 (not) in [filter_vals_2] and ...
Args:
key: byte offset (not used in this function; returned as is)
value: (string)
via jobconfs (MANDATORY) - target_columns: can be
1) "*" : all columns selected as return value
2) comma delimited list of ints as a string like "1,2,3"
via jobconfs (MANDATORY) - key_columns: can be
1) "*" : all columns selected as return value
2) comma delimited list of ints as a string like "1,2,3"
via jobconfs (OPTIONAL) - delimiter: the delimter the file is split on
via jobconfs (OPTIONAL) - filter_columns : pipe delimited list of ints like: 1|2|3. This list will be split
and entry i will be used with filter vals list i
via jobconfs (OPTIONAL) - filter_vals: is a pipe delimited list of comma delimited list of strings as a string like a,b,c|d,e,f|,..
this list is split on | and entry i is used as the filter list for filter column i
via jobconfs (OPTIONAL) - invert_filter_vals: pipe delimited list of boolean integers, e.g., 0|1|0...
this list is split, and used to trigger "not in" instead of in (like WHERE NOT)try i is 1, then values are selected where filter column i is
NOT in filter_vals list i
All three of these must be passed in or nothing happens (no where clause)
EXAMPLE:
jobconf = ['filter_columns=1|2, filtervals=a,b|c,d, invert_filter_vals = 0|1
Does a
SELECT * where column[1] in ["a","b"] and column[2] NOT in ["c,d"]
Note that you can pass in the same column twice where invert_filter_vals = 0 then 1, e.g.,:
jobconf = ['filter_columns=1|1, filtervals=a,b|c,d, invert_filter_vals = 0|1
to get a "column[1] in ["a","b"] but not in ["c","d"] effect.
Yields:
(k, v)
where k = target_column_1+target_column_2+...,+target_column_N,
where v = target_column_1, ..., target_column_N)
for the subset of (key, value) inputs matching the where clause
"""
if not "filtering" in cache and "filter_columns" in os.environ and "filter_vals" in os.environ and "invert_filter_vals" in os.environ:
cache["filtering"] = _filtering_parsing_helper("filter_columns", "filter_vals", "invert_filter_vals")
if not "delimiter" in cache:
cache["delimiter"] = os.environ["delimiter"]
if not "target_columns" in cache:
if os.environ["target_columns"] == "*":
cache["target_columns"] = "*"
else:
cache["target_columns"] = [int(x) for x in os.environ["target_columns"].split(",")] #list
if not "key_columns" in cache:
if os.environ["key_columns"] == "*":
cache["key_columns"] = "*"
else:
cache["key_columns"] = [int(x) for x in os.environ["key_columns"].split(",")] #list
k, v = _kv_helper(cache, value)
if k and v:
yield k,v
def join_mapper(key, value, cache={}):
""""table" refers to all files in one HDFS root directory below:
PURPOSE:
Very similar to "select_where_mapper" except the key output is different.
Outputs (key_1+key_2,..., value) where key_1, key_2,... are given by the columns specified in key_columns.
When run on tables I_1, I_2 that share keys "1,2,3",
where I_1 has the shared keys in columns A,B,C
and I_2 has they shared keys in columns D,C,E,
then run with join_inner_reducer, this implements:
SELECT I_1.COL_a, I_1.COL_b,.. I_2.COL_a, I_2.COL_b,
FROM I_1 INNER JOIN I_2
ON I_1.key1 = I_2.key1, I_1.key=I_2.key2,...
WHERE I_1.filter_column_1 (not) in filter_vals_1_for_I_1, I_1.filter_column_2 (not) in filter_vals_2_for_I_1, ...
and I_2.filter_column_1 (not) in filter_vals_1_for_I_2, I_2.filter_column_2 (not) in filter_vals_2_for_I_2, ...
Args:
key: byte offset (not used in this function; returned as is)
value: (string)
via jobconfs (MANDATORY) - table_1_path='...' string representing the HDFS path(s) (if multiple, should be a string with commas between the individual paths) of the files of "table 1". used to parse out the key columns from this table when table 2 has the same keys but in different columns
via jobconfs (MANDATORY) - table_2_path='...' " "
via jobconfs (MANDATORY) - table_1_key_columns=1,2,3': comma delimited list of ints as a string like "1,2,3"
via jobconfs (MANDATORY) - table_2_key_columns=1,2,3': " "
via jobconfs (MANDATORY) - table_1_delimiter: the delimter the file 1 is split on
via jobconfs (MANDATORY) - table_2_delimiter: " "
via jobconfs (MANDATORY) - table_1_target_columns: can be
1) "*" : all columns selected as return value
2) comma delimited list of ints as a string like "1,2,3"
via jobconfs (MANDATORY) - table_2_target_columns: " "
For example and usage of the following 6 parameters, see the docstring of select_where;
they are the same except duplicated for "table_1" and table_2"
via jobconfs (OPTIONAL) - table_1_filter_columns
via jobconfs (OPTIONAL) - table_1_filter_vals
via jobconfs (OPTIONAL) - table_1_invert_filter_vals
via jobconfs (OPTIONAL) - table_2_filter_columns
via jobconfs (OPTIONAL) - table_2_filter_vals
via jobconfs (OPTIONAL) - table_2_invert_filter_vals
Yields:
a subset of the (key_1+key_2+..., value) for each input pair
"""
PREFIX = None
INPUT = os.environ["mapreduce_map_input_file"]
#Determine what table this row is a part of.
#To resolve the known issue listed in the readme about paths not containing asterisks, this needs
#to be updated to include some fancy regex logic
for t1p in os.environ["table_1_path"].split(","):
if INPUT.startswith(t1p):
PREFIX = "1"
break
if not PREFIX:
for t2p in os.environ["table_2_path"].split(","):
if INPUT.startswith(t2p):
PREFIX = "2"
break
if not PREFIX:
raise Exception("Bug: File {0} matches neither input path 1 ({1}) or input path 2 ({2})".format(INPUT, os.environ["table_1_path"], os.environ["table_2_path"]))
TABLE = os.environ["table_{0}_path".format(PREFIX)]
if not "filtering" in cache and "table_{0}_filter_columns".format(PREFIX) in os.environ and "table_{0}_filter_vals".format(PREFIX) in os.environ and "table_{0}_invert_filter_vals".format(PREFIX) in os.environ:
cache["filtering"] = _filtering_parsing_helper("table_{0}_filter_columns".format(PREFIX), "table_{0}_filter_vals".format(PREFIX), "table_{0}_invert_filter_vals".format(PREFIX))
if not "key_columns" in cache:
cache["key_columns"] = [int(x) for x in os.environ["table_{0}_key_columns".format(PREFIX)].split(",")] #list
if not "target_columns" in cache:
if os.environ["table_{0}_target_columns".format(PREFIX)] == "*":
cache["target_columns"] = "*"
else:
cache["target_columns"] = [int(x) for x in os.environ["table_{0}_target_columns".format(PREFIX)].split(",")] #list
if not "delimiter" in cache:
cache["delimiter"] = os.environ["table_{0}_delimiter".format(PREFIX)]
k, v = _kv_helper(cache, value)
if k and v:
outdict = {}
outdict["table"] = TABLE
outdict["row"] = v
yield k, outdict
|
3,441 | a96761fc483c0883b058c2b045b038522c23d426 | T = int(input())
for i in range(T):
start, end = map(int, input().split())
between = end-start
flag = 0
num =1
while between>0:
if flag%2==1:
between-=num
num+=1
flag+=1
else:
between-=num
flag+=1
print(flag)
|
3,442 | d66945add0726c85b8ac29056269ed55c6eb9369 | # Copyright 2020 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
import numpy as np
def _MinutesToMicroseconds(minutes):
return minutes * 60 * 1_000_000
def _AssignUniqueIDAndFlooredTimeAsKey(row):
return (
str(row["info"]["unique_id"])
+ "-"
+ str(
_MinutesToMicroseconds(
int(np.floor(row["time"] / _MinutesToMicroseconds(5)))
)
),
row,
)
class _PickMaxRecord(beam.DoFn):
def process(self, data):
_, streams = data
time_dicts = []
info_dicts = []
metrics_dicts = []
abstract_metrics_dicts = []
for d in streams:
time_dicts.append(d["time"])
info_dicts.append(d["info"])
metrics_dicts.append(d["metrics"])
abstract_metrics_dicts.append(d["abstract_metrics"])
vm_sample = {
"time": time_dicts[0],
"info": info_dicts[0],
"metrics": {
k: np.nanmax(
np.nan_to_num(
np.array([d[k] for d in metrics_dicts], dtype=np.float64)
)
)
for k in metrics_dicts[0]
},
"abstract_metrics": {
k: max([d[k] for d in abstract_metrics_dicts])
for k in abstract_metrics_dicts[0]
},
}
return [vm_sample]
def _VMSampleToSimulatedSample(vm_sample):
simulated_sample = {
"simulated_time": _MinutesToMicroseconds(5)
* int(np.floor(vm_sample["time"] / _MinutesToMicroseconds(5))),
"simulated_machine": str(vm_sample["info"]["machine_id"]),
"sample": vm_sample,
}
return simulated_sample
def AlignByTime(data):
keyed_data = data | "Flooring time" >> beam.Map(_AssignUniqueIDAndFlooredTimeAsKey)
five_minute_groups = keyed_data | "Group Data by Keys" >> beam.GroupByKey()
max_record = five_minute_groups | "Pick Max Record in 5 Minutes" >> beam.ParDo(
_PickMaxRecord()
)
simulated_sample = max_record | "Change VMSample to SimulatedSammple" >> beam.Map(
_VMSampleToSimulatedSample
)
return simulated_sample
|
3,443 | 632b90ea5a2ac35539e589af297c04b31bbf02d0 | import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
html.H3('Node 6'),
dcc.Dropdown(
id='node-6-dropdown',
options=[
{'label': 'Node 6 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='node-6-display-value'),
])
@app.callback(
Output('node-6-display-value', 'children'),
[Input('node-6-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
|
3,444 | 290b8b4c3aeafc84b1e9cce7e6d2a5e770bd8716 | cadena = input("Introduzca su cadena: ")
separador = input("Introduzca el separador: ")
print(cadena.replace(" ", separador)) |
3,445 | 32c28c7a1e1572744387b509fc6a448554ed565e | # Generated by Django 2.2.5 on 2019-10-28 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='my_resume',
field=models.CharField(choices=[('', ''), ('삼성전자', '삼성전자')], default=True, max_length=80),
),
]
|
3,446 | d5c2b73c202c9944cd64798ef5ddc08ce68a4a9a | import os.path
from flask import url_for
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey
from sqlalchemy.orm import relationship
from tuneful import app
from .database import Base, engine, session
class Song(Base):
__tablename__ = 'songs'
id = Column(Integer, primary_key=True)
file_id = Column(Integer, ForeignKey('files.id'), nullable=False)
def as_dictionary(self):
file_data = session.query(File).get(self.file_id)
song_dict = {
"id": self.id,
"file": {
"id": file_data.id,
"filename": file_data.filename
}
}
return song_dict
class File(Base):
__tablename__ = 'files'
id = Column(Integer, primary_key=True)
filename = Column(String, nullable=False)
song = relationship("Song", uselist=False, backref="song")
def as_dictionary(self):
file_dict = {
"id": self.id,
"filename": self.filename
}
return file_dict |
3,447 | 856e62cf4cd443c7b3397e926f8fc4fece145f5b | # coding=utf-8
class Movie:
def __init__(self,movieid,moviename,score,poster):
self.movieid=movieid
self.moviename=moviename
self.score=score
self.poster=poster
for i in range(1,32):
print("<option value =\""+str(i)+"\">"+str(i)+"</option>") |
3,448 | 691075aa5c629e2d0c486ec288cd39bc142cdc7a | # Simple read based on the py _sql context
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
flow_data = sc._jvm.com.tetration.apps.IO.read(sqlContext._ssql_ctx, "/tetration/flows/", "PARQUET", "LASTHOUR")
flow_data.registerTempTable("flowtab")
# show the unique src_address and dst_address pairs
df = sqlContext.sql("select src_address, dst_address from flowtab where dst_address like '10.66.239.%' group by src_address, dst_address order by dst_address")
df.show(1000)
# show the unique dst_addresses
df = sqlContext.sql("select dst_address from flowtab where dst_address like '10.66.239.%' group by dst_address order by dst_address")
df.show(1000)
# show the sum of fwd_bytes of each dst_address
dstIPs = df.rdd.map(lambda p: "" + p.dst_address).collect()
for dstip in dstIPs:
sql = "select src_address, dst_address, sum(fwd_bytes) from flowtab where dst_address like \'" + dstip + "\' group by src_address, dst_address"
print(sql)
sqlContext.sql(sql).show()
|
3,449 | 23e673909b2f1eb9a265ce84ad63464e20e99c6a | # coding=utf-8
import pyautogui
from xpinyin import Pinyin
rubbish_dic=1
if rubbish_dic==0:
chinese_rubbish=(
u"草泥马",
u"你妈死了",
u"你是不是",
u"低能",
u"人话都听不懂",
u"没家教的狗东西",
)
elif rubbish_dic==1:
rubbish_file=open("rubbish_dic.txt")
chinese_rubbish=rubbish_file.read().splitlines()
rubbish_set=[] #最终的拼音方式
p=Pinyin() #用于转换拼音
#通过点击的方式切屏
def trans_screen():
pyautogui.doubleClick(492,974)
pyautogui.typewrite(['enter'],0.01)
#将中文转化成拼音
def trans_chinese():
for c_rubbish in chinese_rubbish:
pin=p.get_pinyin(c_rubbish,'')
pin_list=list(pin)
pin_list.append("1")
rubbish_set.append(pin_list)
#发送text
def send_rubbish():
for p_rubbish in rubbish_set:
pyautogui.typewrite(p_rubbish,0.01)
pyautogui.typewrite(['enter'],0.01)
#查看当前的rubbish_set内容
def chk_rubbish():
for p_dirty in rubbish_set:
print(p_dirty)
if __name__ == "__main__":
trans_chinese()
#chk_rubbish()
trans_screen()
send_rubbish()
|
3,450 | 49e1dc98ecc2e5c12c6e520721a6c0a7c2665cca | from flask import Flask, jsonify, make_response, request
app = Flask(__name__)
VERSION = (0, 1, 0)
VERSION_STRING = "{}.{}.{}".format(*VERSION)
LANG_ID = "lang.natural.english"
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route("/")
def entry():
return jsonify([{
"id": "com.natlang",
"name": "NatLang",
"website": "https://aaron.stockdill.nz/",
"version": VERSION_STRING,
"description": "A placeholder natural language reasoner.",
"icon": "",
"base": "http://aarons-macbook.local:5003/api/{}".format(VERSION_STRING),
"provides": {
"reason": "/reason",
"translate": "/translate"
}
}])
@app.route("/api/{}/reason".format(VERSION_STRING))
def reason_base():
return jsonify({
"result": "success",
"reasoning": [[LANG_ID, "manual", "reasonEnglish", "Manually reason with natural language."]]
})
def common_transform(json_data, key):
old_goal = json_data.get(key)
new_goal_data = json_data.get("extraInfo")
new_goal = old_goal.copy()
new_goal["data"] = new_goal_data
new_goal["language"] = LANG_ID
print(new_goal)
return new_goal
@app.route("/api/{}/reason/apply".format(VERSION_STRING), methods=["GET", "POST"])
def reason_apply():
rule_id = request.args.get("id")
if rule_id == "reasonEnglish":
json_data = request.get_json()
new_goal = common_transform(json_data, "goal")
return jsonify({
"result": "success",
"newGoals": [new_goal] if new_goal["data"] else []
})
else:
return jsonify({
"result": "failure",
"reason": "Unknown rule ID."
})
@app.route("/api/{}/translate".format(VERSION_STRING))
def translate_base():
other_languages = ["lang.speedith", "lang.isabelle"]
def all_pairs(xs, ys):
for x in xs:
for y in ys:
yield (x, y)
yield (y, x)
return jsonify({
"result": "success",
"translations": [(from_lang, to_lang, "manual")
for (from_lang, to_lang) in all_pairs(other_languages, [LANG_ID])]
})
@app.route("/api/{}/translate/translate".format(VERSION_STRING), methods=["GET", "POST"])
def translate_apply():
from_language = request.args.get("from")
to_language = request.args.get("to")
print(LANG_ID in {from_language, to_language}, LANG_ID, from_language, to_language)
if LANG_ID in {from_language, to_language}:
json_data = request.get_json()
new_goal = common_transform(json_data, "formula")
return jsonify({
"result": "success",
"formula": new_goal
})
else:
return jsonify({
"result": "failure",
"reason": "Unable to translate when one of the languages is not {}".format(LANG_ID)
})
if __name__ == "__main__":
app.run()
|
3,451 | 5cdf8cd4bfebb9aab2e8f421047fc1ba3190d566 | ##
#Author: Stephen
##
import socket
import select
import sys, os
from contextlib import contextmanager
hostip = 'localhost'
hostport = 8089
def connect(hostip=hostip,hostport=hostport):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
IP_address = hostip
Port = hostport
server.connect((IP_address, Port))
return server
def terminal_mode():
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n','UTF-8'))
while True:
# maintains a list of possible input streams
sockets_list = [sys.stdin, server]
""" There are two possible input situations. Either the
user wants to give manual input to send to other people,
or the server is sending a message to be printed on the
screen. Select returns from sockets_list, the stream that
is reader for input. So for example, if the server wants
to send a message, then the if condition will hold true
below.If the user wants to send a message, the else
condition will evaluate as true"""
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
for socks in read_sockets:
if socks == server:
message = socks.recv(2048)
sys.stdout.write("[Server]: "+message.decode("UTF-8"))
sys.stdout.write("\n\n[You]: ")
sys.stdout.flush()
else:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
print('Connection Closed.')
server.close()
def send_command(message):
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((hostip, hostport))
clientsocket.recv(2048)#supress welcome message
clientsocket.send(bytes(message, 'UTF-8'))
response = clientsocket.recv(2048)
clientsocket.close()
#print(response.decode("UTF-8"))
return response.decode("UTF-8")
def WIN_read_socket(server):
sockets_list = [server] #ONLY THIS IS DIFFERENT
read_sockets,write_socket, error_socket = select.select(sockets_list,[],[])
while True:
for socks in read_sockets:
if socks == server:
# maintains a list of possible input streams
message = socks.recv(2048)
sys.stdout.write("[Server]: "+message.decode("UTF-8"))
sys.stdout.write("\n\n[You]: ")
sys.stdout.flush()
print('Connection Closed.')
server.close()
def WIN_write_socket(server):
while True:
message = sys.stdin.readline()
if message == 'exit':
return
else:
server.send(bytes(message, 'UTF-8'))
try:
if str(sys.argv[1]) == 'terminal':
if str(sys.argv[2]) == 'windows':
from threading import *
server = connect()
server.send(bytes('Connected via Terminal. Hello!\n','UTF-8'))
Thread(target=WIN_read_socket, args=(server,)).start()
Thread(target=WIN_write_socket, args=(server,)).start()
else:
terminal_mode()
except:
pass
|
3,452 | 15105e22b3c1860735f282a2247ab41b138d75cf | import matplotlib.pyplot as plt
w1 = [(1, 2, 7), (1, 8, 1),
(1, 7, 5), (1, 6, 3),
(1, 7, 8), (1, 5, 9),
(1, 4, 5)]
w2 = [(-1, -4, -2), (-1, 1, 1),
(-1, -1, -3), (-1, -3, 2),
(-1, -5, -3.25), (-1, -2, -4),
(-1, -7, -1)]
dataset = [(1, 2, 7), (1, 8, 1),
(1, 7, 5), (1, 6, 3),
(1, 7, 8), (1, 5, 9),
(1, 4, 5), (-1, -4, -2),
(-1, 1, 1), (-1, -1, -3),
(-1, -3, 2), (-1, -5, -3.25),
(-1, -2, -4), (-1, -7, -1)]
# Single Perceptron function
def single_sample_perceptron():
weight = [1, 1, 1]
iterations = 0
while(1):
iterations = iterations+1
ans = 0
count = 0
eta = 0.2
# print weight
for j in xrange(len(dataset)):
ans = 0
for i in xrange(3):
ans = ans+float(weight[i]*dataset[j][i])
if(ans < 0):
for i in xrange(3):
weight[i] = weight[i]+eta*dataset[j][i]
break
count += 1
if count == len(dataset):
break
print
print "Final weights: ",
print weight
print "No. of Iterations: ",
print iterations
return weight
def main():
a = single_sample_perceptron()
x1 = x2 = y1 = y2 = []
for j in range(len(w1)):
x1.append(w1[j][1])
y1.append(w1[j][2])
for j in range(len(w2)):
x2.append((-1)*w2[j][1])
y2.append((-1)*w2[j][2])
plt.plot(x1, y1, 'ro')
plt.plot(x2, y2, 'bo')
m1 = a[2]/a[1]
m2 = (-1)/(m1)
c = (-1)*a[0]/a[2]
ya = m2*100+c
yb = m2*(-100)+c
plt.plot([100, -100], [ya, yb], 'r')
plt.axis([-10, 10, -10, 10])
plt.show()
if __name__ == "__main__":
main()
|
3,453 | 4f54f3e306df3b861124adb4fe544089446e8021 | import unittest
import sys
import matplotlib.pyplot as plotter
import numpy
sys.path.append("/home/adityas/UGA/SensorWeb/scripts/Summer2018/code")
from simulator.component import CPU, DiskIO, Network
class TestComponents(unittest.TestCase):
def setUp(self):
self.cpu = CPU(cycles=5)
self.disk = DiskIO(cycles=5)
self.network = Network(cycles=5)
def test_cpu_length(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_disk_length(self):
cpu_data = self.disk.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_network_length(self):
cpu_data = self.network.get_data(start=0, stop=1, noise=0.01)
self.assertEqual(cpu_data.shape[0], 100)
def test_visualize(self):
cpu_data = self.cpu.get_data(start=0, stop=1, noise=0.01)
disk_data = self.disk.get_data(start=0, stop=1, noise=0.01)
network_data = self.network.get_data(start=0, stop=1, noise=0.01)
plotter.plot(cpu_data)
plotter.plot(disk_data)
plotter.plot(network_data)
plotter.show()
if __name__ == "__main__":
unittest.main()
|
3,454 | 0d6177660a9b9c22bcf6eb11763e7fe1ee03b46a | #!/usr/bin/env python
# Copyright (c) 2018, University of Stuttgart
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# Jim Mainprice on Sunday June 13 2018
import demos_common_imports
from pyrieef.geometry.workspace import *
from pyrieef.geometry.pixel_map import sdf
from pyrieef.rendering.workspace_planar import WorkspaceDrawer
env = EnvBox(dim=np.array([2., 2.]))
box = Box(origin=np.array([-.2, -.2]))
segment = Segment(origin=np.array([.4, -.1]), orientation=0.2)
circle = Circle(origin=np.array([.5, .5]), radius=0.2)
workspace = Workspace(env)
workspace.obstacles.append(box)
workspace.obstacles.append(segment)
workspace.obstacles.append(circle)
# Compute Occupancy map and SDF
nb_points = 20
occupancy_map = occupancy_map(nb_points, workspace)
signed_distance_field = sdf(occupancy_map)
# Setup viewer
viewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)
viewer.draw_ws_img(signed_distance_field)
# viewer.draw_ws_img(occupancy_map)
# import cv2
# Draw blured image
# viewer.draw_ws_img(
# ndimage.gaussian_filter(
# cv2.resize(src=signed_distance_field,
# dsize=(300, 300),
# interpolation=cv2.INTER_NEAREST), sigma=3))
viewer.draw_ws_obstacles()
viewer.show_once()
|
3,455 | 3d1f2130043613dc8d5bbd773edd96c87c355de9 | import rambench
rambench.perform_benchmark()
|
3,456 | 8010c0d53af6d428f29ff3ce63bcd6b5b811b051 | $ pip install "<package_name> >= 1.1"
|
3,457 | 72286078841c7fe5b297767576741dbbd0a80411 | import pytest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import extract_tables_columns
def test_get_tables():
sql_str = "SELECT * FROM table1, table2 WHERE table1.column1 = table2.column1;"
assert(extract_tables_columns.get_tables(sql_str)) == [('TABLE1', 'TABLE1'), ('TABLE2', 'TABLE2')]
def test_get_tables_mutiline():
sql_str = """
SELECT *
FROM table1, table2
WHERE table1.column1 = table2.column1;
"""
assert(extract_tables_columns.get_tables(sql_str)) == [('TABLE1', 'TABLE1'), ('TABLE2', 'TABLE2')]
def test_get_tables_tables_on_muti_lines():
sql_str = """
SELECT *
FROM table1, table2,
table3
WHERE table1.column1 = table2.column1;
"""
assert(extract_tables_columns.get_tables(sql_str)) == [('TABLE1', 'TABLE1'), ('TABLE2', 'TABLE2'), ('TABLE3', 'TABLE3')]
def test_get_tables_single_table():
sql_str = """
SELECT *
FROM table1
WHERE table1.column1 = table2.column1;
"""
assert(extract_tables_columns.get_tables(sql_str)) == [('TABLE1', 'TABLE1')]
def test_get_tables_left_join():
sql_str = """
SELECT *
FROM table1
LEFT JOIN table2 ON table1.column1 = table2.column2
WHERE table1.column1 < 10;
"""
assert(extract_tables_columns.get_tables(sql_str)) == [('TABLE1', 'TABLE1'), ('TABLE2', 'TABLE2')] |
3,458 | 8a21a7005fb17cc82759079022b540cf4fd062c5 | def search4vowels(word):
""" Return sny vowels founded in a supplied word."""
vowels = set('aeiou')
found = vowels.intersection(set(word))
#return found
for vowels in found:
print(vowels)
|
3,459 | cf3b4e2c76091f95d24e8a987a63ece46503d6e8 | import numpy as np
import time
import uuid
from datetime import datetime
log_host = "agent1"
class State:
def __init__(self, path, iterations):
self.path = path
self.iterations = iterations
def run(self):
assert 0, "run not implemented"
class BruteForceAttackState(State):
def run(self):
os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16', 'MacOS10'])
addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13', '127.0.0.42'])
for i in range(self.iterations):
timestamp = datetime.now()
log_id = uuid.uuid4()
message = "Unsuccessful login attempt"
os = os_val
log_type = "Informational"
host = log_host
log_machine = addr_val
log = str(timestamp)+"|"+str(log_id)+"|"+message+"|"+os+"|"+log_type+"|"+host+"|"+log_machine
print(log)
f = open(self.path, "a")
f.write(log + "\n")
f.close()
time.sleep(0.2)
class NoAlarmState(State):
def run(self):
for i in range(self.iterations):
os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16', 'MacOS10'])
addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13', '127.0.0.42'])
timestamp = datetime.now()
log_id = uuid.uuid4()
message = "Unsuccessful login attempt"
os = os_val
log_type = "Informational"
host = log_host
log_machine = addr_val
log = str(timestamp)+"|"+str(log_id)+"|"+message+"|"+os+"|"+log_type+"|"+host+"|"+log_machine
print(log)
f = open(self.path, "a")
f.write(log + "\n")
f.close()
time.sleep(1.5)
|
3,460 | 3e0bc91b81d0f503b78c9ac685b05b7ecb754e28 | # dealing with the packet fragments and their reconsttruction
import logging
# shut up scapy
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
conf.verb=0
from collections import OrderedDict
pkt_frag_loads = OrderedDict()
def get_load(pkt):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
#create full load from load fragments
load = pkt[Raw].load
pkt_frag_loads = frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
return full_load
def frag_remover(ack, load):
'''
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
'''
global pkt_frag_loads
# Keep the number of IP:port mappings below 50
# last=False pops the oldest item rather than the latest
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
# Loop through a deep copy dict but modify the original dict
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
# Keep 25 ack:load's per ip:port
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
# Keep the load less than 75,000 chars
for ack in copy_pkt_frag_loads[ip_port]:
# If load > 5000 chars, just keep the last 200 chars
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
return pkt_frag_loads
def frag_joiner(ack, src_ip_port, load):
'''
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
'''
global pkt_frag_loads
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
# Make pkt_frag_loads[src_ip_port][ack] = full load
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
|
3,461 | 5b894eac93bff44931df4ef8d845c23071a03227 | import requests
import pandas as pd
import time
def job_spider(jid="1913e38066dd3c8e1Hd40t--FVE~", ka="search_list_1", i=0):
# request info.
job_url = "https://www.zhipin.com/job_detail/" + jid + ".html"
headers = {
'cache-control': "no-cache",
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/71.0.3578.80 Safari/537.36'
}
querystring = {"ka": ka}
try:
# request
r = requests.request("GET", job_url, headers=headers, params=querystring)
content = r.content.decode('utf-8')
# raw data.
file = "./raw_data/page/" + jid + ".html"
with open(file, 'w', encoding='utf-8') as f:
f.write(content)
result = "suceed"
except IOError:
result = "failed"
log = "job " + str(i) + " : " + jid + " crawl " + result
print(log)
if __name__ == "__main__":
file = "./raw_data/list/job_list.csv"
df = pd.read_csv(file, encoding='utf-8', header=None)
jid_list = df[0].values.tolist()
ka_list = df[1].values.tolist()
# print(jid_list)
for i in range(0, len(jid_list)):
job_spider(jid_list[i], ka_list[i], i)
time.sleep(1)
|
3,462 | e434d5519e3ba4255ed928769070de391cb0955b | import glob
import html
import os
import re
import sys
import textwrap
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import pycountry
import requests
from pyd2v import D2V
from pymediainfo import MediaInfo, Track
from pynfogen.formatter import CustomFormats
class NFO:
AUDIO_CHANNEL_LAYOUT_WEIGHT = {
"LFE": 0.1
}
IMDB_ID_T = re.compile(r"^tt\d{7,8}$")
TMDB_ID_T = re.compile(r"^(tv|movie)/\d+$")
TVDB_ID_T = re.compile(r"^\d+$")
def __init__(self) -> None:
self.media_info: MediaInfo
self.file: str
self.season: Optional[Union[int, str]]
self.episode: Optional[int]
self.episode_name: Optional[str]
self.videos: List[Track]
self.audio: List[Track]
self.subtitles: List[Track]
self.chapters: Dict[str, str]
self.chapters_numbered: bool
self.fanart_api_key: Optional[str]
self.source: Optional[str]
self.note: Optional[str]
self.preview: Optional[str]
self.imdb: str
self.tmdb: Optional[str]
self.tvdb: Optional[int]
self.title_name: str
self.title_year: str
self.episodes: int
self.release_name: str
self.preview_images: List[dict[str, str]]
self.banner_image: Optional[str]
self.session = self.get_session()
def __repr__(self) -> str:
return "<{c} {attrs}>".format(
c=self.__class__.__name__,
attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
)
def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str:
"""
Evaluate and apply formatting on template, apply any art if provided.
Any additional parameters are passed as extra variables to the template.
The extra variables have priority when there's conflicting variable names.
"""
variables = self.__dict__
variables.update(kwargs)
template = CustomFormats().format(template, **variables)
if art:
art = art.format(nfo=template)
template = art
for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template):
# TODO: This if check is quite yucky, look into alternative options.
# Ideally a custom format spec would be great.
template = template.replace(
m.group(0),
m.group(2) if int(m.group(1)) else ""
)
template = "\n".join(map(str.rstrip, template.splitlines(keepends=False)))
return template
def set_config(self, file: str, **config: Any) -> None:
self.file = file
self.media_info = MediaInfo.parse(self.file)
self.fanart_api_key = config.get("fanart_api_key")
self.source = config.get("source")
self.note = config.get("note")
self.preview = config.get("preview")
self.season = config.get("season")
self.episode, self.episode_name = config.get("episode") or (None, None)
self.episodes = self.get_tv_episodes()
self.release_name = self.get_release_name()
self.videos = self.media_info.video_tracks
self.audio = self.media_info.audio_tracks
self.subtitles = self.media_info.text_tracks
tracks_without_language = [
x for x in self.videos + self.audio + self.subtitles
if not x.language or x.language == "und"
]
if tracks_without_language:
print("The following tracks have no language tag! All tracks need a language tag!")
for track in tracks_without_language:
print(f"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)")
print(
"Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\n"
"Don't forget to verify and add language tags to the rest of the files too!"
)
sys.exit(1)
chapters = next(iter(self.media_info.menu_tracks), None)
if chapters:
self.chapters = {
".".join([k.replace("_", ".")[:-3], k[-3:]]): v.strip(":")
for k, v in chapters.to_data().items()
if f"1{k.replace('_', '')}".isdigit()
}
self.chapters_numbered = all(
x.split(":", 1)[-1].lower() in [f"chapter {i + 1}", f"chapter {str(i + 1).zfill(2)}"]
for i, x in enumerate(self.chapters.values())
)
else:
self.chapters = {}
self.chapters_numbered = False
self.imdb = self.get_imdb_id(config.get("imdb"))
self.tmdb = self.get_tmdb_id(config.get("tmdb"))
self.tvdb = self.get_tvdb_id(config.get("tvdb"))
self.title_name, self.title_year = self.get_title_name_year()
self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None
self.preview_images = self.get_preview_images(self.preview) if self.preview else []
def get_imdb_id(self, imdb_id: Any) -> str:
"""
Get an IMDB ID from either the media's global tags, or the config.
Since IMDB IDs are required for this project, it will bug the user for
one interactively if not found.
"""
if not imdb_id:
general_track = self.media_info.general_tracks[0].to_data()
imdb_id = general_track.get("imdb")
if not imdb_id:
print("No IMDB ID was provided but is required...")
while not imdb_id or not isinstance(imdb_id, str):
user_id = input("IMDB ID (e.g., 'tt0487831'): ")
if not self.IMDB_ID_T.match(user_id):
print(f"The provided IMDB ID {user_id!r} is not valid...")
print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').")
else:
imdb_id = user_id
return imdb_id
def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]:
"""
Get a TMDB ID from either the media's global tags, or the config.
It will raise a ValueError if the provided ID is invalid.
"""
if not tmdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tmdb_id = general_track.get("tmdb")
if not tmdb_id:
print("Warning: No TMDB ID was provided...")
return None
if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str):
print(f"The provided TMDB ID {tmdb_id!r} is not valid...")
print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').")
raise ValueError("Invalid TMDB ID")
return tmdb_id
def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]:
"""
Get a TVDB ID from either the media's global tags, or the config.
It will raise a ValueError if the provided ID is invalid.
"""
if not tvdb_id:
general_track = self.media_info.general_tracks[0].to_data()
tvdb_id = general_track.get("tvdb")
if not tvdb_id:
print("Warning: No TVDB ID was provided...")
return None
if isinstance(tvdb_id, int):
tvdb_id = str(tvdb_id)
if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str):
print(f"The provided TVDB ID {tvdb_id!r} is not valid...")
print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').")
raise ValueError("Invalid TVDB ID")
return int(tvdb_id)
def get_title_name_year(self) -> Tuple[str, str]:
"""Scrape Title Name and Year (including e.g. 2019-) from IMDB"""
r = self.session.get(f"https://www.imdb.com/title/{self.imdb}")
if r.status_code != 200:
raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]")
imdb_page = html.unescape(r.text)
imdb_title = re.search(
# testing ground: https://regex101.com/r/bEoEDn/1
r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)"
r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>",
imdb_page
)
if not imdb_title:
raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...")
return imdb_title.group("name").strip(), imdb_title.group("year").strip()
def get_tv_episodes(self) -> int:
"""Calculate total episode count based on neighbouring same-extension files."""
return len(glob.glob(os.path.join(
os.path.dirname(self.file),
f"*{os.path.splitext(self.file)[-1]}"
)))
def get_release_name(self) -> str:
"""
Retrieve the release name based on the file used during MediaInfo.
If a season was specified, but an episode number was not, it presumes the release is a Pack.
Hence when pack, it uses the parent folder's name as the release name.
"""
if self.season is not None and self.episode is None:
return os.path.basename(os.path.dirname(self.file))
return os.path.splitext(os.path.basename(self.file))[0]
def get_banner_image(self, tvdb_id: int) -> Optional[str]:
"""
Get a wide banner image from fanart.tv.
Currently restricts banners to English-only.
"""
if not tvdb_id:
return None
if not self.fanart_api_key:
raise ValueError("Need Fanart.tv api key for TV titles!")
r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}")
if r.status_code == 404:
return None
res = r.json()
error = res.get("error message")
if error:
if error == "Not found":
return None
raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}")
banner = next((
x["url"] for x in (res.get("tvbanner") or [])
if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language
), None)
return banner
def get_preview_images(self, url: str) -> List[Dict[str, str]]:
if not url:
return []
images = []
for domain in ["imgbox.com", "beyondhd.co"]:
if domain not in url.lower():
continue
page = self.session.get(url).text
if domain == "imgbox.com":
for m in re.finditer('src="(https://thumbs2.imgbox.com.+/)(\\w+)_b.([^"]+)', page):
images.append({
"url": f"https://imgbox.com/{m.group(2)}",
"src": f"{m.group(1)}{m.group(2)}_t.{m.group(3)}"
})
elif domain == "beyondhd.co":
for m in re.finditer('/image/([^"]+)"\\D+src="(https://.*beyondhd.co/images.+/(\\w+).md.[^"]+)', page):
images.append({
"url": f"https://beyondhd.co/image/{m.group(1)}",
"src": m.group(2)
})
break
return images
def get_video_print(self, videos: List[Track]) -> List[List[str]]:
if not videos:
return [["--"]]
data = []
for video in videos:
codec = {
"MPEG Video": f"MPEG-{(video.format_version or '').replace('Version ', '')}"
}.get(video.format, video.format)
scan_overview = video.scan_type
vst = False
if codec in ["MPEG-1", "MPEG-2"]:
# parse d2v file with pyd2v, generates D2V if needed
d2v = D2V.load(Path(self.file))
self.file = d2v.path
# get every frames' flag data, this contains information on displaying frames
# add vob and cell number to each frames flag data as well
flags = [f for line in [
[dict(**y, vob=x["vob"], cell=x["cell"]) for y in x["flags"]] for x in d2v.data
] for f in line]
interlaced_percent = (sum(1 for f in flags if not f["progressive_frame"]) / len(flags)) * 100
if interlaced_percent == 100:
scan_overview = "Interlaced (CST)"
else:
scan_overview = f"{round(interlaced_percent, 2)}% Interlaced (VST)"
vst = True
for ext in ["log", "d2v", "mpg", "mpeg"]:
fp = os.path.splitext(self.file)[0] + "." + ext
if os.path.exists(fp):
os.unlink(fp)
line_1 = "- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}".format(
language=pycountry.languages.get(alpha_2=video.language).name,
codec=codec,
profile=video.format_profile,
width=video.width, height=video.height,
aspect=video.other_display_aspect_ratio[0],
bitrate=f"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}"
)
line_2 = " {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}".format(
fps=f"{video.framerate_num}/{video.framerate_den}" if video.framerate_num else video.frame_rate,
fps_mode="VFR" if vst else video.frame_rate_mode,
color_space=video.color_space,
subsampling=video.chroma_subsampling.replace(":", ""),
bit_depth=video.bit_depth,
scan=scan_overview
)
data.append([line_1, line_2])
return data
def get_audio_print(self, audio: List[Track]) -> List[str]:
if not audio:
return ["--"]
data = []
for t in audio:
if t.title and "Commentary" in t.title:
title = t.title
else:
title = pycountry.languages.get(alpha_2=t.language).name
if t.channel_layout:
channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(" ")))
else:
channels = float(t.channel_s)
bit_rate_mode = f" ({t.bit_rate_mode})" if t.bit_rate_mode else ""
l1 = f"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}"
data += [(" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))]
return data
@staticmethod
def get_subtitle_print(subs: List[Track]) -> List[str]:
"""
Return a list of a brief subtitle overview per-subtitle.
e.g.
- English, Forced, SubRip (SRT)
- English, SubRip (SRT)
- English, SDH, SubRip (SRT)
- Spanish, Latin American (SDH), SubRip (SRT)
The bit of text between the Language and the Subtitle format is the Track Title.
It can be of any format, but it is recommended to be used as shown above.
It will be returned as a list of strings with the `- ` already pre-pended to each entry.
"""
data = []
if not subs:
data.append("--")
for sub in subs:
line_items = []
# following sub.title tree checks and supports three different language and title scenarios
# The second scenario is the recommended option to choose if you are open to choosing any
# The third scenario should be used if you have nothing unique to state about the track
# | Language | Track Title | Output |
# | ------------ | ----------------------------- | --------------------------------------------- |
# | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |
# | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |
# | es / Spanish | None | - Spanish, SubRip (SRT) |
language = pycountry.languages.get(alpha_2=sub.language).name
if sub.title:
if language.lower() in sub.title.lower():
line_items.append(sub.title)
else:
line_items.append(f"{language}, {sub.title}")
else:
line_items.append(language)
line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)"))
line = "- " + ", ".join(line_items)
data += [
(" " + x if i > 0 else x)
for i, x in enumerate(textwrap.wrap(line, 64))
]
return data
@staticmethod
def get_chapter_print(chapters: Dict[str, str]) -> List[str]:
if not chapters:
return ["--"]
return [
f"- {k}: {v}"
for k, v in chapters.items()
]
def get_chapter_print_short(self, chapters: Dict[str, str]) -> str:
if not chapters:
return "No"
if self.chapters_numbered:
return f"Yes (Numbered 01-{str(len(chapters)).zfill(2)})"
return "Yes (Named)"
@staticmethod
def get_session() -> requests.Session:
session = requests.Session()
session.headers.update({
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"DNT": "1",
"UPGRADE-INSECURE-REQUESTS": "1"
})
return session
|
3,463 | bbbbf0e1bbd7ead034d8cd88ee6a09a61cde7803 | #!/usr/bin/env python
import pyaudio
import wave
import winshell
"""
This script accesses the Laptop's microphone using the library pyaudio and opens a stream to record the voice
and writes it to an mp3 file
"""
def start():
try:
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
dest_path = winshell.desktop() + r"\Spyware\Output"
dest_path = dest_path.replace('\\','/') + "/outputaudio.mp3"
WAVE_OUTPUT_FILENAME = dest_path
p = pyaudio.PyAudio()
# open stream
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
# start streaming and writing to mp3 file
while True:
data = stream.read(CHUNK)
frames.append(data)
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
except Exception:
print("Failed") |
3,464 | 4a0cbd59ffae4fb5ba6e3bd871231e37065d1aed | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
created by gjwei on 3/26/17
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
a = ListNode(1)
a.next = ListNode(3)
a.next = None
print a.val
print a.next
def main():
print "hello"
a = []
for i in range(20):
a.append(i)
return a
|
3,465 | b7511c156c241accaf1668d83ee0a5263b41af0d | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.core.validators import MinLengthValidator, MaxLengthValidator, RegexValidator
from pizzaclub.settings import MAX_DNI_LENGTH, MAX_CUIL_LENGTH, PASSWORD_RESET_TIMEOUT
from pizzaclub.settings import MIN_DNI_LENGTH, MIN_CUIL_LENGTH
from pizzaclub.settings import MAX_PHONE_LENGTH, MIN_PHONE_LENGTH
import secrets
import datetime
# Create your models here.
class Address(models.Model):
address = models.CharField(max_length=100, unique=True)
lat = models.DecimalField(max_digits=9, decimal_places=7, default=0)
lon= models.DecimalField(max_digits=9, decimal_places=7, default=0)
elev = models.DecimalField(max_digits=9, decimal_places=2, default=0)
class Meta:
verbose_name_plural = "Address"
def __str__(self):
return self.address
class User(AbstractUser):
'''
Extend the User Django built in model.
Add token data for password reset, ans is_employee flag for Employee Profile.
'''
is_employee = models.BooleanField(default=False)
token = models.CharField(max_length=50)
token_date = models.DateTimeField(auto_now=True)
token_valid = models.BooleanField(default=True)
def is_order_manager(self):
return (self.is_employee and self.is_active) or self.is_superuser
def generate_token(self):
return secrets.token_urlsafe()
def check_token(self, token):
'''
Check token validity for an hour since was generated.
'''
tz = self.token_date.tzinfo
t_now = datetime.datetime.now(tz=tz)
# Check the token time less than hour
dt = t_now - self.token_date
if dt.total_seconds() > PASSWORD_RESET_TIMEOUT:
self.token_valid = False
# Return True if the token is correct and is_valid
res = (token == self.token) and self.token_valid
# Set the token invalid
self.token_valid = False
return res
def save(self, *args, **kwargs):
'''
Until save generate a new token and set valid.
'''
# Generate a token and set valid
self.token = self.generate_token()
self.token_valid = True
super(User, self).save(*args, **kwargs)
class Employee(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
dni = models.CharField(
max_length=MAX_DNI_LENGTH,
unique=True,
validators=[
MinLengthValidator(MIN_DNI_LENGTH),
MaxLengthValidator(MAX_DNI_LENGTH),
RegexValidator(regex=r'^\d+$')
])
cuil = models.CharField(
max_length=MAX_CUIL_LENGTH,
unique=True,
validators=[
MinLengthValidator(MIN_CUIL_LENGTH),
MaxLengthValidator(MAX_CUIL_LENGTH),
RegexValidator(regex=r'^\d+$')
])
phone = models.CharField(
max_length=MAX_PHONE_LENGTH,
null=True,
blank=True,
validators=[
MinLengthValidator(MIN_DNI_LENGTH),
MaxLengthValidator(MAX_DNI_LENGTH),
RegexValidator(regex=r'^\d+$')
])
address = models.ManyToManyField(Address)
def __str__(self):
return self.user.get_full_name()
def save(self, *args, **kwargs):
# Check user is employee
if not self.user.is_employee:
raise TypeError('The User must be an Employee')
# Check validation fields
self.full_clean()
# Save instance
super(Employee, self).save(*args, **kwargs)
class Client(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField()
phone = models.CharField(
max_length=MAX_PHONE_LENGTH,
validators=[
MinLengthValidator(MIN_DNI_LENGTH),
MaxLengthValidator(MAX_DNI_LENGTH),
RegexValidator(regex=r'^\d+$')
])
address = models.ManyToManyField(Address)
|
3,466 | 6b7ff00eb9a5d0837def5b245ba2d4a0acec972e | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 15:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenges', '0019_auto_20170310_1114'),
]
operations = [
migrations.AddField(
model_name='challenge',
name='supported_languages',
field=models.ManyToManyField(to='challenges.Language'),
),
]
|
3,467 | 72cda573bf9c744213a2957d51171f437f211353 | import serial
import time
from Files_management import get_mov_parameters,change_mov_parameters
#-------------------------------------------------------------------------------
def create_port():
port = get_mov_parameters()[1]
try:
ser = serial.Serial(port=port,baudrate=9600,timeout=1)
return ser
except:
print('Open port failded')
change_mov_parameters('0',port,'0','0')
return False
#-------------------------------------------------------------------------------
def port_status(ser):
if(ser.isOpen()):
if(get_mov_parameters()[0] == "1" or get_mov_parameters()[0] == "True"):
return True
else:
try:
create_port()
return True
except:
print("error opening")
change_mov_parameters('0',get_mov_parameters()[1],'0','0')
return False
#-------------------------------------------------------------------------------
def close_port(ser):
ser.close()
#-------------------------------------------------------------------------------
def send_value(value):
port = create_port()
status = get_mov_parameters()[0]
if(port_status(port)):
if(status == '1' or status == 'True'):
string = "".join([str(value),' \n'])
port.write(string.encode())
print('True')
else :
print('False')
|
3,468 | c773b273ad6953bf9c74b11c44aff16e9fd0860e | # coding=utf-8
# Copyright 2021-Present The THUCTC Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thuctc.utils as utils
from thuctc.modules.module import Module
from thuctc.modules.layer_norm import LayerNorm
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError("The rank of input must be 3.")
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype,
device=inputs.device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype,
device=inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name="embedding"):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(
torch.empty(self.embed_nums, self.embed_dims))
self.add_name(self.weight, "weight")
if bias:
self.bias = nn.Parameter(
torch.zeros(self.embed_dims))
self.add_name(self.bias, "bias")
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0.0,
std=self.embed_dims ** -0.5)
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False,
layer_norm=False, dropout=0.0, scale=False, name="embedding"):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary["source"])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size,
self.embedding_size,
name="word_embedding")
if self.pos_embed is not None:
if self.pos_embed == "learnable":
self.pos_embeddings = Embedding(params.max_pos,
self.embedding_size,
name="pos_embedding")
elif self.pos_embed == "functional":
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError("Unsupported position "
"embedding: %s" % pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size,
name="type_embedding")
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size,
eps=params.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size,
old_embedding_dim,
name="word_embedding").to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * (self.embedding_size ** 0.5)
if self.pos_embed is not None:
if self.pos_embed == "learnable":
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == "functional":
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
|
3,469 | 2cfc1bea6dd1571eff67c3f49b2a1899560c7ba7 | def koodrinate(kraj, kraji):
for ime, x, y in kraji:
if ime == kraj:
return x, y
kraji = {
'Brežice': (68.66, 7.04),
'Lenart': (85.20, 78.75),
'Rateče': (-65.04, 70.04),
'Ljutomer': (111.26, 71.82),
'Rogaška Slatina': (71.00, 42.00),
'Ribnica': (7.10, -10.50),
'Dutovlje': (-56.80, -6.93),
'Lokve': (-57.94, 19.32),
'Vinica': (43.81, -38.43),
'Brtonigla': (-71.00, -47.25),
'Kanal': (-71.00, 26.25),
'Črnomelj': (39.05, -27.93),
'Trbovlje': (29.61, 35.07),
'Beltinci': (114.81, 80.54),
'Domžale': (-2.34, 31.50)
}
def koodrinate(kraj, kraji):
return kraji.get(kraj)
napis = "KRNEKI"
vrednost = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
vsota = 0
for c in napis:
vsota += vrednost.get(c, 0)
print(sum(vrednost.get(c, 0) for c in napis))
for c in napis:
if c == "I":
vsota += 1
elif c == "V":
vsota += 5
elif c == "X":
vsota += 10
elif c == "L":
vsota += 50
elif c == "C":
vsota += 100
elif c == "D":
vsota += 500
elif c == "M":
vsota += 1000
|
3,470 | 9be6940fc6f405db652d478f9a74fcf56d8a0ad7 | from django.urls import path
from . import views # . current directory
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("menu", views.menu, name="menu"),
path("add_item", views.add_item, name="add_item"),
path("confirm_order", views.confirm_order, name="confirm_order")
] |
3,471 | b66142e0b674d3920b8e3ad74e0d0b753f0a78c3 | from .embedpeek import EmbedPeek
__red_end_user_data_statement__ = "This cog does not persistently store data or metadata about users."
def setup(bot):
bot.add_cog(EmbedPeek(bot))
|
3,472 | 09417014963172fc71b4268aafdec1405c04f34d | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from kneed import KneeLocator
#Create a panda data frame from the csv file
df = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])
#Convert the panda data frame to a NumPy array
arr = df.to_numpy()
#Code used to visualise the data and check if the import worked correctly
#Now commented out but retained for debugging.
#plt.scatter(arr[:,0],arr[:,1], label='True Position')
#plt.show()
# Create an array to store the Sum of Squared Errors or the cluster inertia
# for the k-clusters in multiple runs of the K-Means algo with different
# number of clusters assumed
distortions = []
for i in range(1,11):
km = KMeans(n_clusters=i, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
# Find the elbow or knee from the plot of no. of clusters vs distortion for that
# number. This algorithm locates the knee and that is used to provide the Number
# of clusters to the main run of K-means algo.
kn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')
print('The number of clusters are: ' + str(kn.knee))
#plot the no. of clusters vs distortion graph and annotate the elbow point
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
#From the sciKitLearn clustering algorithms, the K-means clustering
#algorithm is used.
km = KMeans(
n_clusters=kn.knee, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0
)
#Obtain the cluster labels by running the K-means algorithm with
# the parameters defined above.
y_km = km.fit_predict(arr)
#Color Array
colors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']
#Marker Array
markers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']
#Plot the clusters.
for i in range(0, 3):
plt.scatter(
arr[y_km == i, 0], arr[y_km == i, 1],
s=50, c=colors[i],
marker=markers[i], edgecolor='black',
label='cluster ' + str(i+1)
)
# Plotting the centroids for all the clusters.
plt.scatter(
km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
s=250, marker='*',
c='red', edgecolor='black',
label='centroids'
)
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
|
3,473 | 41889456fbb56d263e0039716519e8959316b67e | #! /usr/bin/env python
#
# Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
LOG.warn("WARNING: No alert message map entries loaded.")
else:
LOG.info("Loaded %s rule message map entries.", msgmap.size())
if classmap.size() == 0:
LOG.warn("WARNING: No classifications loaded.")
else:
LOG.info("Loaded %s classifications.", classmap.size())
eve_filter = EveFilter(msgmap, classmap)
outputs = []
if args.output:
outputs.append(OutputWrapper(args.output))
if args.stdout:
outputs.append(OutputWrapper("-", sys.stdout))
else:
outputs.append(OutputWrapper("-", sys.stdout))
if args.directory and args.prefix:
reader = unified2.SpoolEventReader(
directory=args.directory,
prefix=args.prefix,
follow=args.follow,
delete=args.delete,
bookmark=args.bookmark)
elif args.filenames:
reader = unified2.FileEventReader(*args.filenames)
else:
print("nothing to do.")
return
for event in reader:
try:
encoded = json.dumps(eve_filter.filter(event), encoding="latin-1")
for out in outputs:
out.write(encoded)
except Exception as err:
LOG.error("Failed to encode record as JSON: %s: %s" % (
str(err), str(event)))
if __name__ == "__main__":
sys.exit(main())
|
3,474 | 40b1fac14aaa81039aec8e80ce1c91bb881cfe78 | debt = 100
equity = 50
ratio = debt / equity
if ratio <= 2:
print('😊')
else:
print('⚠️')
print('Ratio is', ratio)
|
3,475 | 303e1b95c2ca60041a34b8c09e013849112a108d | import matplotlib.image as mpimg
import cv2
import rasterio
from ode_data_access.image_utils import view_as_blocks, is_black, align_and_crop
import os
import numpy as np
from tqdm import tqdm
class ChunkProcessor:
def write_result_blocks(self, result_blocks, window, product_name, chunk_size, save_dir='test', skip_black_images=False,
align_and_crop_thresholds=None, vectorized_chunks=None):
for i in range(result_blocks.shape[0]):
for j in range(result_blocks.shape[1]):
img = result_blocks[i][j]
if not skip_black_images or not is_black(img):
filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{window.width}_h_{window.height}_x_{i}_y_{j}.jpg'
filepath = './' + save_dir + '/' + filename
mpimg.imsave(filepath, img, cmap="gray")
img = mpimg.imread(filepath)
if align_and_crop_thresholds is not None:
img = align_and_crop(img, *align_and_crop_thresholds)
img = cv2.resize(img, (chunk_size, chunk_size), cv2.INTER_AREA)
mpimg.imsave(filepath, img, cmap='gray')
new_filename = f'{product_name}_img_row_{window.row_off}_col_{window.col_off}_w_{img.shape[1]}_h_{img.shape[0]}_x_{i}_y_{j}.jpg'
new_filepath = './' + save_dir + '/' + new_filename
os.rename(filepath, new_filepath)
if vectorized_chunks is not None:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
vectorized_chunks.append(img.astype(np.uint8))
# Based on the idea provided here - https://gis.stackexchange.com/questions/158527/reading-raster-files-by-block-with-rasterio
def chunkify(self, img_file, product_name, chunk_size=256, save_dir='test', skip_black_images=True, align_and_crop_thresholds=None,
vectorized_chunks=None):
with rasterio.open(img_file) as src:
print('Resolution =', src.width, 'x', src.height)
print('Estimated number of iterations =', ((src.width * src.height) / (1024 * 1024)) * 1.085)
for block_index, window in tqdm(src.block_windows(1)):
block_array = src.read(window=window)
# print('Block array', block_array.shape)
block_array = np.moveaxis(block_array, 0, -1)
# print('Move axis', block_array.shape)
if block_array.shape[2] != 1:
block_array = cv2.cvtColor(block_array, cv2.COLOR_RGB2GRAY)
else:
block_array = np.squeeze(block_array)
block_array_shape = block_array.shape
# plt.imshow(block_array, cmap='gray')
# print('Grayscale Block Shape', block_array_shape)
if block_array_shape[0] % chunk_size == 0 and block_array_shape[1] % chunk_size == 0:
result_blocks = view_as_blocks(block_array, block_shape=(chunk_size, chunk_size))
self.write_result_blocks(result_blocks, window, product_name, chunk_size, save_dir, skip_black_images,
align_and_crop_thresholds, vectorized_chunks)
def chunkify_all(self, save_dir_prefix, chunk_size, product_image_urls, skip_black_images=True, align_and_crop_thresholds=None,
vectorized_chunks=None):
for product_image_url, product_name in product_image_urls:
filename = product_image_url.split('/')[-1]
if filename.endswith('JP2') or filename.lower().endswith('jpg'):
print('Chunkifying', product_name)
jp2_filename = filename
chunk_dir = save_dir_prefix + '_' + product_name
if not os.path.exists(chunk_dir):
os.makedirs(chunk_dir)
self.chunkify(jp2_filename, product_name, chunk_size, chunk_dir, skip_black_images, align_and_crop_thresholds,
vectorized_chunks)
print("Number of chunks found:",
len([name for name in os.listdir(chunk_dir) if os.path.isfile(chunk_dir + '/' + name)]))
print('-----') |
3,476 | 883cb1e3ea227bb5ac5aa3b4348336ab1a7fba70 | import pygame
import numpy as np
import glob
from entities.base import AnimatedSprite
images_path = sorted(glob.glob('./resources/trophy_sparkle_*.png'))
trophy_im_dict = {'sparkle':[pygame.transform.scale(pygame.image.load(img_path),(400,400)) for img_path in images_path]}
class Trophy(AnimatedSprite):
def __init__(self, position, image_dict, hold_for_n_frames=3,):
super().__init__(position, image_dict, hold_for_n_frames)
self.initial_position = position
self.frames_alive = 0
def update(self):
super().next_frame()
|
3,477 | ca91052072d7b2da5729cf55f7f4ba4b54608017 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 11:43:58 2020
@author: Dr. Tang
"""
import tensorflow as tf
# 需要你编程:将下面转换成tensorflow
#x = 10
#y = 2
#u=x/y
#z = u- 1
x=tf.placeholder(tf.int32)
y=tf.placeholder(tf.int32)
u=tf.divide(x,y)
z=tf.subtract(u,tf.constant(1.0,dtype=tf.float64))
# 需要你编程:从session中打印 z
with tf.Session() as sess:
output=sess.run(z,feed_dict={x:10,y:2})
print(output)
|
3,478 | ee3718dee869a58089e897489af2eec3ff72be56 | from django.shortcuts import render
from post.models import *
from .models import *
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from account.models import Profile
from django.contrib.auth.models import User
from django.db.models import Q
# Create your views here.
def index(request):
posts = Post.objects.order_by('-created_at').filter(status='Published')
# about_us = AboutSite.objects.get(id=1)
paginator = Paginator(posts, 9)
page = request.GET.get('page')
post_listings = paginator.get_page(page)
context = {
'posts': post_listings,
# 'about': about_us
}
return render(request, 'hub/index.html', context)
def about(request):
about_us = get_object_or_404(AboutSite,id=1)
context = {
'about': about_us
}
return render(request, 'hub/about.html', context)
def authors(request):
profiles = Profile.objects.all()
context = {
'profiles': profiles
}
return render(request, 'hub/authors.html', context)
def authorDetail(request, pk):
author = User.objects.get(username=pk)
profile = Profile.objects.get(user=author)
posts = Post.objects.order_by('-created_at').filter(status='Published', author=author)
paginator = Paginator(posts, 6)
page = request.GET.get('page')
posts_paginated = paginator.get_page(page)
context = {
'author': profile,
'posts': posts_paginated
}
return render(request, 'hub/authorDetail.html', context)
# def search(request):
# queryset_list = Post.objects.order_by('-created_at')
# if 'q' in request.GET:
# query = request.GET['q']
# if query:
# queryset_list = queryset_list.filter(Q(title__icontains=query) | Q(description__icontains=query) | Q(content__icontains=query))
# paginator = Paginator(queryset_list, 1)
# page = request.GET.get('page')
# paginated_result = paginator.get_page(page)
# context = {
# 'posts': paginated_result
# }
# return render(request, 'hub/search.html', context) |
3,479 | 66f8fa5fc12dc80b8f46684c39781c2e4634de4a | # -*- coding:utf-8 -*-
import requests
import json
def fun1():
s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']),
('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), # yh
('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生']), # zyy
]
num = 1
city_code = ['上海']
for s_key, store_names in s_cut:
for store in store_names:
for code in city_code:
params = {'keywords': store,
'types': '购物服务',
'city': code,
'citylimit': 'True',
'output': 'json',
'key': s_key,
'offset': 20,
'page': num}
response = requests.get('https://restapi.amap.com/v3/place/text', params=params)
map_results = json.loads(response.text)
print(map_results)
return map_results
json_text = fun1()
print(json_text['pois'])
print(len(json_text['pois']))
|
3,480 | 25d210144ef209fd5e4ff7e4e4c2e77fd7eb79ac | # Copyright 2017 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
DEPS = [
'step',
]
def RunSteps(api):
try:
api.step('test step', [{}])
except AssertionError as e:
assert str(e) == 'Type <type \'dict\'> is not permitted. cmd is [{}]'
def GenTests(api):
yield api.test('basic') |
3,481 | de3e952ad43fe7e323e8f975a45bbd4eec7192db | class Node(object):
def __init__(self, d, n=None):
self.data = d
self.next_node = n
def get_data(self):
return self.data
def set_data(self, d):
self.data = d
def get_next(self):
return self.next_node
def set_next(self, n):
self.next_node=n
class LinkedList(object):
def __init__(self, r=None):
self.root = r
self.size = 0
def get_size(self):
return self.size
def add(self, d):
"""
To Add a new node or prepend a new node
"""
new_node = Node(d)
self.root = new_node
self.size += 1
return d
def append(self, d):
this_node = self.root
while this_node is not None:
if this_node.get_next() is None:
this_node.set_next(Node(d))
self.size += 1
return d
else:
this_node=this_node.get_next()
def remove(self, d):
this_node = self.root
prev_node = None
while this_node is not None:
if this_node.get_data() == d:
if prev_node:
prev_node.set_next(this_node.get_next())
self.size -= 1
return True
else:
prev_node = this_node
this_node = this_node.get_next()
return false
def find(self, d):
this_node = self.root
while this_node is not None:
if this_node.get_data() == d:
return True
else:
this_node=this_node.get_next()
return False
myList=LinkedList()
myList.add(1)
myList.append(2)
print myList.find(2)
print myList.get_size()
myList.remove(1)
print myList.find(2)
print myList.get_size()
|
3,482 | 15fea8a84accdfc2dac87c111cbe8bfca61fe801 | import sys
from pprint import pprint
sys.stdin = open("sample_input.txt", "r")
test_case = int(input())
"""
for test in range(test_case):
nxn_array, palin_len = map(int, input().split())
## 2차 배열 만들기 => 행스트링 리스트
order_2nd_array = []
for i in range(nxn_array):
order_2nd_array.append(input())
## 열 나열을 갖는 2차 배열 만들기 => 열스트링 리스트
order_2nd_array_col = []
for i in range(nxn_array):
tmp_string = ''
for row in order_2nd_array:
tmp_string += row[i]
order_2nd_array_col.append(tmp_string)
## '행 palin 확인 법' 부분에서 쓰일 변수
if palin_len % 2 == 0:
even_comp = 1
else:
even_comp = 0
## palindrome 검사
palin_check = 0
while palin_check != 1:
## 행 검사
for row in order_2nd_array:
if palin_check == 1:
break
else:
## 행 palin 확인 법 => palin string을 반으로 자른 후 뒷 부분은 역방향 정렬 후 앞부분과 비교
for i in range(nxn_array - palin_len + 1):
if (row[i] == row[i + palin_len - 1]) and (row[i:i + palin_len // 2] == row[i + palin_len:i + palin_len // 2 - even_comp:-1]):
palin_check = 1
palin_str = row[i:i + palin_len]
break
## 중간 검사: 행 검사에서 이미 찾았으면 빠져나와
if palin_check == 1:
break
## 열스트링 리스트 검사
for row in order_2nd_array_col:
if palin_check == 1:
break
else:
## 열스트링 리스트의 행 palin 확인 법
for i in range(nxn_array - palin_len + 1):
if (row[i] == row[i + palin_len - 1]) and (row[i:i + palin_len // 2] == row[i + palin_len:i + palin_len // 2 - even_comp:-1]):
palin_check = 1
palin_str = row[i:i + palin_len]
break
print('#{} {}'.format(test+1, palin_str))
## 너무 복잡하다. 다른 방법 찾아보자
"""
"""
for test in range(test_case):
# 가능한 모든 경우를 조사하는 방법
nxn_array, palin_len = map(int, input().split())
## 2차 배열 만들기 => 행스트링 리스트
order_2nd_array = []
for i in range(nxn_array):
order_2nd_array.append(input())
## 열 나열을 갖는 2차 배열 만들기 => 열스트링 리스트
order_2nd_array_col = []
for i in range(nxn_array):
tmp_string = ''
for row in order_2nd_array:
tmp_string += row[i]
order_2nd_array_col.append(tmp_string)
palin_check = 0
while palin_check != 1:
# 시작위치 0 ~ N - M
for row in order_2nd_array:
for start in range(nxn_array - palin_len + 1):
end = start + palin_len - 1
for i in range(palin_len//2):
if row[start + i] != row[end - i]:
break
else:
palin_result = row[start:end]
palin_check = 1
if palin_check != 1:
for row in order_2nd_array_col:
for start in range(nxn_array - palin_len + 1):
end = start + palin_len - 1
for i in range(palin_len // 2):
if row[start + i] != row[end - i]:
break
else:
palin_result = row[start:end]
palin_check = 1
print(palin_result)
"""
|
3,483 | ca0bca24509df2bf0bd07fb2f31d3e7909957405 | # coding: utf-8
from datetime import datetime
#from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from persol_users.models import PersolUser
from django.db.models import Q, Count
# アンケート
from questions.models import Question
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=50)
pass
def __str__(self):
return self.name
class Event(models.Model):
STATUS_CHOICES = (
("N","募集中"),
("E","募集終了")
)
author = models.ForeignKey(PersolUser, verbose_name='作成者', related_name='author')
event_name = models.CharField('イベントタイトル', max_length=200)
event_image = models.ImageField('イメージ画像', upload_to='event_image', blank=True, null=True)
event_datetime = models.DateTimeField('開催日時', null=True)
event_location = models.CharField('開催場所', max_length=200, blank=True)
num_of_members = models.IntegerField('募集人数')
dead_line = models.DateField('募集締切日', blank=True,null=True)
overview = models.TextField('イベント概要')
# comment = models.ManyToManyField(Comment)
like = models.ManyToManyField(PersolUser,verbose_name='いいね', related_name='like')
watch = models.ManyToManyField(PersolUser,verbose_name='ウォッチ', related_name='Watch')
members = models.ManyToManyField(PersolUser)
search_tag = models.TextField('検索用タグ', blank=True, null=True)
event_status = models.CharField('イベントステータス', max_length=1, choices=STATUS_CHOICES, blank=False, null=False, default='N')
# アンケート
question_date = models.OneToOneField(Question, related_name='event_date', blank=True, null=True)
question_location = models.OneToOneField(Question, related_name='event_location', blank=True, null=True)
def __str__(self):
return self.event_name
def nokori(self):
now_member = self.members.count()
return self.num_of_members - now_member
def like_list(self):
return self.like.all()
def event_date(self):
try:
return self.event_datetime.strftime('%Y.%m.%d')
except AttributeError:
return ""
def event_starttime(self):
try:
return self.event_datetime.strftime('%H:%M~')
except AttributeError:
return ""
def nobreak_overview(self):
return self.overview.replace("\n", "")
# アンケート削除
def question_delete(self, type):
if type == 'd':
q = self.question_date
self.question_date = None
elif type == 'l':
q = self.question_location
self.question_location = None
if q:
q.delete()
# アンケート取得。なければデフォルト値のダミーアンケートを返す
def question_date_or_dummy(self):
qd = self.question_date
if not qd:
qd = Question.get_default_question('d')
return qd
def question_location_or_dummy(self):
ql = self.question_location
if not ql:
ql = Question.get_default_question('l')
return ql
def mailing_list(self):
member_addr=[member.mail_address for member in self.members.all()]
watcher_addr=[watcher.mail_address for watcher in self.watch.all()]
ml=member_addr+watcher_addr
return ml
def status(self):
if self.event_status == "N": return "募集中"
if self.event_status == "E": return "イベント終了"
else:return ""
def datetimeForIndex(self):
if self.event_datetime:
return self.event_datetime
if not self.question_date:
return "未定"
else:
return "アンケート中"
def locationForIndex(self):
if self.event_location:
return self.event_location
if not self.question_location:
return "未定"
else:
return "アンケート中"
def oldstatus(self):
if self.event_datetime < datetime.now():
return 'old'
else:
return ''
"""
python manage.py makemigrations
python manage.py migrate
""" |
3,484 | ab3609c27fa002d79735c5d5c09ec7a52fedd040 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-07 23:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20160207_1529'),
]
operations = [
migrations.AddField(
model_name='event',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='eventtemplate',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='historicalevent',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AddField(
model_name='historicaleventtemplate',
name='skins_type',
field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),
),
migrations.AlterField(
model_name='event',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='event',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='eventtemplate',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='eventtemplate',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='historicalevent',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='historicalevent',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
migrations.AlterField(
model_name='historicaleventtemplate',
name='event_type',
field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),
),
migrations.AlterField(
model_name='historicaleventtemplate',
name='scoring',
field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),
),
]
|
3,485 | df3dcbf3c8d621f5db2a07765a0a28e7626387d9 | #!/usr/bin/env python3
# Rhino Motor Driver (RMCS 2303) - Basic Modbus Communication
# -----------------------------------------------------------
"""
BSD 3-Clause License
Copyright (c) 2021, Rajesh Subramanian
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import time
import traceback
import minimalmodbus as modbus
import rhino_params as rhino
class Controller:
def __init__(self, port_name, slave_address):
# Parameters
self.__instrument = modbus.Instrument(port_name, slave_address, modbus.MODE_ASCII)
self.__instrument.serial.baudrate = 9600
self.__instrument.serial.parity = modbus.serial.PARITY_NONE
self.__instrument.bytesize = 8
self.__instrument.stopbits = 1
self.__instrument.timeout = 5 # seconds
self.__instrument.write_timeout = 5 # seconds
self.__instrument.clear_buffers_before_each_transaction = True
# self.__instrument.close_port_after_each_call = True
self.__time_delay = 0.001 #0.001 # default: 1 ms
self.__lock_resource = False # To prevent issuing simultaneous commands to RMCS2303 motor controller. Eg.
# trying to read encoder value while writing motor enable command
self.name = self.extract_name_from_port_name(port_name)
self.__status_rotation_direction = 0
self.__CW = 1 # clockwise rotation status
self.__CCW = -1 # counter clockwise rotation status
self.__IDLE = 0 # no rotation status
# Functions
self.__set_lines_per_rotation(rhino.LINES_PER_ROTATION_DEFAULT)
self.brake()
self.__go_home()
self.set_acceleration(rhino.ACCELERATION_DEFAULT)
self.set_speed(rhino.SPEED_DEFAULT)
# Private Functions
# -----------------
@staticmethod
def __convert_unsigned32_to_signed32(unsigned32_data):
# UInt32 range: 0 to 4294967295
# Int32 range: -2147483648 to 2147483647
mid_uint32 = 2147483648
if unsigned32_data is not None:
signed32_data = int(unsigned32_data - mid_uint32)
return signed32_data
@staticmethod
def __convert_signed32_to_signed16(signed32_data):
# Int16 range: -32768 to 32767
signed16_data = signed32_data >> 16
return signed16_data
def __read_from_register(self, message_list):
while True: # Attempt sending message until the controller is free
try:
if not self.__lock_resource: # Check if controller is in use
self.__lock_resource = True
data = self.__instrument.read_register(message_list[0], message_list[1], message_list[2])
time.sleep(self.__time_delay)
self.__lock_resource = False
return data
except KeyboardInterrupt:
print("Keyboard Interrupt: " + self.name)
except modbus.ModbusException as e:
print("ModbusException at " + self.name + ": " + str(e))
except modbus.serial.SerialException as e:
print("Modbus Serial Exception at " + self.name + ": " + str(e))
except modbus.InvalidResponseError as e:
print("Modbus Invalid Response Exception at " + self.name + ": " + str(e))
except Exception as e:
print("Motor Driver Exception at " + self.name + ": " + str(e))
print(traceback.format_exc())
time.sleep(self.__time_delay)
def __read_from_registers(self, message_list):
while True: # Attempt sending message until the controller is free
try:
if not self.__lock_resource: # Check if controller is in use
self.__lock_resource = True
register_size = 16
data = self.__instrument.read_registers(message_list[0], message_list[1], message_list[2])
lsb = data[0]
msb = data[1]
combined_data = (msb << register_size) + lsb # combining two 16 bit values into one 32 bit value
time.sleep(self.__time_delay)
self.__lock_resource = False
return combined_data
'''
# combine two registers and create a long integer
def combine_two_registers(self, reg):
if reg[1] > 32767:
long_reg = (65535 - reg[1])
b = long_reg << 16
out = (b + 65535 - reg[0]) * -1
else:
long_reg = reg[1]
b = long_reg << 16
out = b + reg[0]
return out
'''
except KeyboardInterrupt:
print("Keyboard Interrupt: " + self.name)
except modbus.ModbusException as e:
print("ModbusException at " + self.name + ": " + str(e))
except modbus.serial.SerialException as e:
print("Modbus Serial Exception at " + self.name + ": " + str(e))
except modbus.InvalidResponseError as e:
print("Modbus Invalid Response Exception at " + self.name + ": " + str(e))
except Exception as e:
print("Motor Driver Exception at " + self.name + ": " + str(e))
print(traceback.format_exc())
time.sleep(self.__time_delay)
def __write_to_register(self, message_list):
while True: # Attempt sending message until the controller is free
try:
if not self.__lock_resource: # Check if controller is in use
self.__lock_resource = True
self.__instrument.write_register(message_list[0], message_list[1], message_list[2], message_list[3])
time.sleep(self.__time_delay)
self.__lock_resource = False
return
except KeyboardInterrupt:
print("Keyboard Interrupt: " + self.name)
except modbus.ModbusException as e:
print("ModbusException at " + self.name + ": " + str(e))
except modbus.serial.SerialException as e:
print("Modbus Serial Exception at " + self.name + ": " + str(e))
except modbus.InvalidResponseError as e:
print("Modbus Invalid Response Exception at " + self.name + ": " + str(e))
except Exception as e:
print("Motor Driver Exception at " + self.name + ": " + str(e))
print(traceback.format_exc())
time.sleep(self.__time_delay)
def __go_home(self):
message = rhino.HOME_POSITION_MESSAGE
self.__write_to_register(message)
def __set_lines_per_rotation(self, lines_per_rotation):
message = rhino.LINES_PER_ROTATION_MESSAGE
message[rhino.DATA_INDEX] = lines_per_rotation
self.__write_to_register(message)
# Public Functions
# ----------------
@staticmethod
def extract_name_from_port_name(port_name):
chars = port_name.split("/")
name = chars[len(chars) - 1]
return name
@staticmethod
def convert_rad_per_sec_to_rpm(radians_per_sec):
# Formula: rpm = rad/sec * 9.549297
rpm = radians_per_sec * 9.549297
rpm_scaled = rpm * rhino.GEAR_RATIO
return rpm_scaled
@staticmethod
def convert_rpm_to_rad_per_sec(rpm):
# Formula: rad/sec = rpm * 0.10472
radians_per_sec = rpm * 0.10472
radians_per_sec_scaled = radians_per_sec / rhino.GEAR_RATIO
return radians_per_sec_scaled
def set_speed(self, speed):
speed_rpm = abs(int(self.convert_rad_per_sec_to_rpm(speed)))
if speed_rpm > rhino.SPEED_MAX:
speed_rpm = rhino.SPEED_MAX
if speed_rpm < rhino.SPEED_MIN:
speed_rpm = rhino.SPEED_MIN
message = rhino.SPEED_MESSAGE
message[rhino.DATA_INDEX] = speed_rpm
self.__write_to_register(message)
def set_acceleration(self, acceleration):
if acceleration > rhino.ACCELERATION_MAX:
acceleration = rhino.ACCELERATION_MAX
if acceleration < rhino.ACCELERATION_MIN:
acceleration = rhino.ACCELERATION_MIN
message = rhino.ACCELERATION_MESSAGE
message[rhino.DATA_INDEX] = acceleration
self.__write_to_register(message)
def turn_motor_cw(self):
message = rhino.TURN_MOTOR_CW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__CW
def turn_motor_ccw(self):
message = rhino.TURN_MOTOR_CCW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__CCW
def stop_rotation_cw(self):
message = rhino.STOP_MOTOR_CW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def stop_rotation_ccw(self):
message = rhino.STOP_MOTOR_CCW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def stop_rotation(self):
message = rhino.STOP_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def emergency_stop(self):
message = rhino.EMERGENCY_STOP_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def get_position_32bit(self):
message = rhino.POSITION_FEEDBACK_MESSAGE
position = self.__read_from_registers(message)
# position = self.__convert_unsigned32_to_signed32(position)
return position
def get_position_16bit(self):
message = rhino.POSITION_FEEDBACK_MESSAGE
position = self.__read_from_registers(message)
position_32bit = self.__convert_unsigned32_to_signed32(position)
position_16bit = self.__convert_signed32_to_signed16(position_32bit)
return position_16bit
def get_position_raw(self):
message = rhino.POSITION_FEEDBACK_MESSAGE
position = self.__read_from_registers(message)
return position
def get_speed(self):
message = rhino.SPEED_FEEDBACK_MESSAGE
speed = self.__read_from_register(message)
speed = self.__convert_unsigned32_to_signed32(speed)
return speed
def brake_cw(self):
message = rhino.BRAKE_CW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def brake_ccw(self):
message = rhino.BRAKE_CCW_MESSAGE
self.__write_to_register(message)
self.__status_rotation_direction = self.__IDLE
def brake(self):
if self.__status_rotation_direction == self.__CW:
self.brake_cw()
print(self.name + ": Brake CW")
self.__status_rotation_direction = self.__IDLE
elif self.__status_rotation_direction == self.__CCW:
self.brake_ccw()
print(self.name + ": Brake CCW")
self.__status_rotation_direction = self.__IDLE
elif self.__status_rotation_direction == self.__IDLE:
print(self.name + ": Motor idle")
else:
print(self.name + ": Motor Unknown Rotation Status")
|
3,486 | 53dd753356d8a8d60975c8f4cdaf20de66c2db46 | import times_series_learning as tsl
import numpy as np
import time
import datetime as dt
import sortedcontainers
import pandas as pd
from collections import defaultdict
class ServerProfileLearning(object):
def __init__(self, data, parameters, distribution, distribution_period, level_threshold,
processus=True, moving_window=60,train_mode=True, verbose=False):
self.label_number = 1 #len(np.unique(data['label'].values))
self.label = 1 #np.unique(data['label'].values)
self.data = data
self.parameters = np.ones((self.label_number + 1, 4)) * parameters # see parameters in times_series_learning
self.data_prep = None
self.hostname = self.data.iloc[0, 1]
self.server_profile = dict()
self.distribution = distribution # distribution of distance list same for all servers all clusters be carefull sorted containers
self.distribution_period = distribution_period # distribution period where we compute metrics
self.level_threshold = level_threshold # level we consider for outliers
self.verbose = verbose
self.processus = processus
self.moving_window = moving_window
self.train_mode = train_mode
self.measures = self.initdict()
self.timestamp_anomaly = pd.DataFrame(columns=['Timestamp','Area_Difference'])
def initdict(self):
d = defaultdict(dict)
for i in range(int((24*6*60)/self.distribution_period)+1):
d[i] = {}
d[i]['Area_Difference'] = []
d[i]['Max_Spread'] = []
return d
# sortedcontainers.SortedDict(sortedcontainers.SortedList())
def preprocess_data(self, data):
data_prep = data.drop(self.data.columns[1:len(self.data.columns) - 1], axis=1)
data_prep = data_prep.groupby(['label'])
return data_prep
def set_profile(self):
t0 = time.time()
t = tsl.TimesSeriesLearning(self.parameters[0, :],
self.distribution_period,
self.level_threshold, self.timestamp_anomaly, self.processus)
t.set_profile(self.data)
self.server_profile[self.hostname + "_general"] = t
#self.data_prep = self.preprocess_data(self.data)
# i = 0
# for k, v in self.data_prep:
# t = tsl.TimesSeriesLearning(self.parameters[i, :],
# self.distribution_period, self.level_threshold, self.processus)
# t.set_profile(v)
# self.server_profile[self.hostname + "_" + str(k)] = t
# print('cluster number ' + str(k) + ' of hostname: ' + self.hostname)
# i += 1
print("Learning Server" + self.hostname + " Done in " + str(time.time() - t0))
# Process distance and update distribution
def process_distance(self, streaming_data):
t0 = time.time()
cluster_name = self.hostname + "_general"
t = self.server_profile[cluster_name]
anomaly, max_spread, min_spread, d, date, threshold, quant = t.compute_distance_profile(streaming_data,
self.distribution,
self.measures,
self.train_mode,
self.verbose)
#streaming_data_prep = self.preprocess_data(streaming_data)
# for k, v in streaming_data_prep:
# cluster_name = self.hostname + "_" + str(k)
# if cluster_name in self.server_profile.keys():
# t = self.server_profile[cluster_name]
# anomaly, max_spread, min_spread, d, date, threshold, quant = t.compute_distance_profile(v,
# self.distribution,
# self.train_mode,
# self.verbose)
# #if anomaly:
# # break
# else:
# print('cluster: ',k)
# print("Logs does not belong to any cluster")
# break
#print("stream proccessed in :", time.time()-t0)
return anomaly, max_spread, min_spread, d, date, threshold, quant
# def simulate_streaming(self, streaming_data,date_start):
# streaming_data.index = pd.to_datetime(streaming_data.timestamp, format='%Y-%m-%d %H:%M:%S')
# streaming_data = streaming_data.sort_index()
# data_list = []
# date = streaming_data.index[0]
# while date < streaming_data.index[-1]:
# data_to_add = streaming_data.loc[date.isoformat():
# (date + dt.timedelta(minutes=self.parameters[2, 0]))].reset_index(drop=True)
# if data_to_add.shape[0]>0:
# data_list.append(data_to_add)
# date += dt.timedelta(minutes=self.parameters[0, 2])
#
# return data
|
3,487 | 0d3cc85cd18ee197b24c8b01b71afe82110bfad2 | """
Stirng - Liste - Dosya
- Fonksiyon yazıyoruz.
- Bu fonksiyon iki parametre alacak. (dosya, string)
1. sorun : Dosyanın içinde string var ise True döndürecek yok ise False
2. sorun : Dosyanın içinde string bulunursa ilk bulunduğu konumu return edecek
3. sorun : Dosyanın içerisinde yazdığımız strinng kaç kere var onu liste halinde return eden fonksiyon
"""
def fonkString(text, string):
if string in text:
print("TRUE")
print(text.index(string), ". sirada ilk", string, "bulundu")
print(text.count(string),"tane",string, "var")
liste = []
for i in range(len(text)):
if(text[i] == string):
liste.append(i)
for x in liste:
print(x)
else:
print("FALSE")
fonkString("Programlama laboratuvari calisma sorulari dosya string liste kullanma ", "m")
|
3,488 | f047afeb6462ab01a8fea1f3c8693608335eb960 | import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
import random
import requests
import os
#Discord Tech Stuff
BOT_PREFIX = ("!")
client = discord.Client()
client = Bot(command_prefix=BOT_PREFIX)
#Functions of the Funny Coin
@client.command()
async def wasitfunny():
possible_responses = [
"Per the judgement from the committee of comedy, we have decided that the joke was indeed funny",
"Per the judgement from the committee of comedy, we have decided that the joke was not fucking funny you cretin",
]
await client.say(random.choice(possible_responses))
@client.command()
async def isitfunny(funny_subject):
responses = [
"Nah that wasn't really funny",
"There is no funny present",
"YOU FORGOT THE FUNNY",
"There is no comedy present here",
"hahaaaaa",
"Funnt",
"Hey man that's pretty funny thanks for sharing",
"jajajajajajajajajaja",
]
await client.say("regarding " + str(funny_subject) + ", " + random.choice(responses))
@client.command()
async def isitironic(irony_subjects):
irony_responses = [
"one irony point",
"that's pretty ironic man",
"ironic",
"no irony present",
"minus irony point",
"where is the irony? I was told there would be irony?",
]
await client.say(random.choice(irony_responses))
#Alex, Me, Chris, Anthony Coins, Want to add system that has coins for everyone and you can make a like profile for coins
afc = 0
mfc = 0
cfc = 0
anfc = 0
@client.command()
async def alexfc(anum):
global afc
afc += int(anum)
await client.say("Alex has " + str(afc) + " funny coins")
@client.command()
async def muhfc(mnum):
global mfc
mfc += int(mnum)
await client.say("Muhammad has " + str(mfc) + " funny coins")
@client.command()
async def chrisfc(cnum):
global cfc
cfc += int(cnum)
await client.say("Chris has " + str(cfc) + " funny coins")
@client.command()
async def antfc(anthnum):
global anfc
anfc += int(anthnum)
await client.say("Anthony has " + str(anfc) + " funny coins")
client.run(str(os.environ.get(TOKEN)))
|
3,489 | acd5cf675522c90fc9fbc96bdeb52f66835626b4 | permissions = ('restart', )
commands = ('restart', )
def get_command(session, parsed_message):
return 'stop', 'restart'
def parse_response(permission, response):
return response
|
3,490 | 57d1fb805fce2ba75ea2962598e809ba35fd7eb6 | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes, functions and modules which contain static data."""
from __future__ import print_function
from __future__ import unicode_literals
import collections
from . import ast
from . import metrics
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(
filename,
lines.get_line_number(node.start),
name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(
ast.ASTBuilder(iter(tokens), filename).generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
# Ignore 'static' at module scope so we can find globals too.
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {
node.name: node
for node in ast_list
if (isinstance(node, ast.VariableDeclaration) and
'static' in node.type.modifiers)
}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(
filename,
lines.get_line_number(static_declarations[name].start),
name))
def run(filename, source, entire_ast, include_paths, quiet):
lines = metrics.Metrics(source)
_find_warnings(filename, lines, entire_ast, True)
_find_unused_static_warnings(filename, lines, entire_ast)
|
3,491 | 31d87b11f6a1f6304a2fef6dd1cd1c0ca292dfe8 | import os
import random
import cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.nets as nets
from skimage.transform import resize
import PIL
import numpy as np
import json
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
sess = tf.InteractiveSession()
image = tf.Variable(tf.zeros((299, 299, 3)))
# 加载inceptionV
def inception(image, reuse):
preprocessed = tf.multiply(tf.subtract(tf.expand_dims(image, 0), 0.5), 2.0)
arg_scope = nets.inception.inception_v3_arg_scope(weight_decay=0.0)
with slim.arg_scope(arg_scope):
logits, end_point = nets.inception.inception_v3(preprocessed, 1001, is_training=False, reuse=reuse)
logits = logits[:, 1:] # ignore background class
probs = tf.nn.softmax(logits) # probabilities
return logits, probs, end_point
logits, probs, end_point = inception(image, reuse=False)
restore_vars = [
var for var in tf.global_variables()
if var.name.startswith('InceptionV3/')
]
saver = tf.train.Saver(restore_vars)
saver.restore(sess, "inception_v3.ckpt")
imagenet_json = 'imagenet.json'
with open(imagenet_json) as f:
imagenet_labels = json.load(f)
# 打印进攻前的图片
def classify(img, correct_class=None, target_class=None):
p = sess.run(probs, feed_dict={image: img})[0]
return np.argmax(p)
# TODO
# 重要代码,获取激活分布8*8
layer_name='Mixed_7c'
num_class=1000
conv_layer = end_point[layer_name]
pre_calss = tf.placeholder(tf.int32)
one_hot = tf.sparse_to_dense(pre_calss, [num_class], 1.0)
signal = tf.multiply(end_point['Logits'][:, 1:], one_hot)
loss = tf.reduce_mean(signal)
grads = tf.gradients(loss, conv_layer)[0]
norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5))
def grad_cam(x, class_num):
output, grads_val = sess.run([conv_layer, norm_grads], feed_dict={image: x, pre_calss: class_num})
output = output[0]
grads_val = grads_val[0]
weights = np.mean(grads_val, axis=(0, 1)) # [512]
cam = np.ones(output.shape[0: 2], dtype=np.float32) # [7,7]
# Taking a weighted average
for i, w in enumerate(weights):
cam += w * output[:, :, i]
# Passing through ReLU
""""""
# cam=np.exp(cam) / np.sum(np.exp(cam), axis=0)
# cam=cam/np.max(cam)
# cam3 = np.expand_dims(cam, axis=2)
# cam3 = np.tile(cam3, [1, 1, 3])
cam = np.maximum(cam, 0)
cam = cam / np.max(cam)
cam3 = cv2.resize(cam, (299, 299))
# cam3=np.expand_dims(cam,axis=2)
# cam=np.tile(cam3,[1,1,3])
# cam = resize(cam, (299, 299,3))
return cam3
def get_gard_cam(img_path, img_class):
demo_epsilon = 2.0 / 255.0
demo_lr = 0.1
demo_steps = 100
img = PIL.Image.open(img_path).convert('RGB')
big_dim = max(img.width, img.height)
wide = img.width > img.height
new_w = 299 if not wide else int(img.width * 299 / img.height)
new_h = 299 if wide else int(img.height * 299 / img.width)
img = img.resize((new_w, new_h)).crop((0, 0, 299, 299))
img = (np.asarray(img) / 255.0).astype(np.float32)
# 展示原分类图
# 获取原图激活区域
rar_gard_cam = grad_cam(img, img_class)
# 显示被进攻后和的激活区域
""""""
# 展示攻击后的图像
# 展示攻击后的图像的激活区域
return img, rar_gard_cam
def show_img(file_name,img,rar,adv):
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(img)
plt.subplot(1, 3, 2)
img = cv2.resize(img, (299, 299))
img = img.astype(float)
img /= img.max()
rar = cv2.applyColorMap(np.uint8(255 * rar), cv2.COLORMAP_JET)
rar = cv2.cvtColor(rar, cv2.COLOR_BGR2RGB)
alpha = 0.0072
rar = img + alpha * rar
rar /= rar.max()
# Display and save
plt.imshow(rar)
plt.subplot(1, 3, 3)
adv = cv2.applyColorMap(np.uint8(255 * adv), cv2.COLORMAP_JET)
adv = cv2.cvtColor(adv, cv2.COLOR_BGR2RGB)
alpha = 0.0072
adv = img + alpha * adv
adv /= adv.max()
plt.imshow(adv)
plt.savefig(file_name)
plt.close()
sess.graph.finalize()
def get_label_name(index):
with open('imagenet_labels.txt','r',encoding='utf8')as f:
data=f.read(index+1)
return data
if __name__ == '__main__':
print(get_label_name(0))
# for r,d,f in os.walk('img_val/n01440764'):
# for file in f:
# imgs=[]
# labels_file = 'imagenet_labels.txt'
# results_file = 'result.txt'
# print('img_val/n01440764/'+file)
# img, cam3 = get_gard_cam('img_val/n01440764/'+file, 0)
# show_img(img,cam3,cam3) |
3,492 | 237724db5130926123a3a31be7070947ec7b01f3 | import sys
import smtplib
from email.mime.text import MIMEText
from email.utils import formatdate
from ... import config
def create_message(from_addr, to_addr, subject, message, encoding):
body = MIMEText(message, 'plain', encoding)
body['Subject'] = subject
body['From'] = from_addr
body['To'] = to_addr
body['Date'] = formatdate()
return body
def send_via_gmail(from_addr, to_addr, body):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login( config['gmail']['user'], config['gmail']['password'])
s.sendmail(from_addr, [to_addr], body.as_string())
s.close()
def gmail(message, to_addr):
body = create_message(
config['gmail']['user'], to_addr, '[Notification]', message, 'utf8')
send_via_gmail(config['gmail']['user'], to_addr, body)
return
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if (argc < 3):
print('USAGE: python gmail.py address message')
raise SystemExit(0)
else:
to_addr = argvs[1]
message = argvs[2]
gmail(message, to_addr)
|
3,493 | 7b4f46f6c286a7d0ef45079b2fd238b81d5f89eb | import cv2 #imports cv2 package
import numpy as np #imports numpy package
import matplotlib.pyplot as plt #imports matplotlib.pyplot package
img_noblur = cv2.imread('road8.jpg') #reads the image
imgnew = img_noblur.copy() #creates a copy of the image
img_noblur_grey = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2GRAY) #converts the image from BGR to Grayscale
img = cv2.GaussianBlur(img_noblur_grey,(5,5),0) #applies a Gaussian Blur to the image for smoothing
sobelx = cv2.Sobel(img,-1,1,0,ksize=3) #applies Sobel horizontal kernel of size 3 to the image
sobelx[sobelx<100] = 0 #discards low intensity pixels
lines = cv2.HoughLinesP(sobelx,1,np.pi/180,100) #use HoughLinesP to detect lines in the image to which Sobel horizontal kernel was applied
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(imgnew,(x1,y1),(x2,y2),(0,255,0),5) #draws the detected lines on the image
imgnew = cv2.cvtColor(imgnew, cv2.COLOR_BGR2RGB) #converts the image from BGR to RGB
img_noblur = cv2.cvtColor(img_noblur, cv2.COLOR_BGR2RGB) #converts the original image from BGR to RGB for display
plt.subplot(131),plt.imshow(img_noblur,cmap = 'gray') #plots the original image
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(sobelx,cmap = 'gray') #plots the result of applying Sobel horizontal kernel to the image
plt.title('Sobel'), plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(imgnew,cmap = 'gray') #plots the result with the road markers detected
plt.title('Output'), plt.xticks([]), plt.yticks([])
plt.show() #displays the figure |
3,494 | 1bb151171bbbb899456324056be3634e87b5c8fb | from floppy.node import Node, Input, Output, Tag, abstractNode
@abstractNode
class StringNode(Node):
Tag('StringOperations')
class StringAppend(StringNode):
"""
Creates a new node which combines two strings. These can be seperated by a delimiter.
:param nodeClass: subclass object of 'Node'.
:return: newly created Node instance.
"""
Input('First', str)
Input('Second', str)
Input('Delimiter', str, optional=True, default='')
Output('Joined', str)
def run(self):
super(StringAppend, self).run()
self._Joined(self._Delimiter.join([self._First, self._Second]))
class ListToString(StringNode):
"""
Creates a new node which combines two strings. These can be seperated by a delimiter.
:param nodeClass: subclass object of 'Node'.
:return: newly created Node instance.
"""
Input('List', object, list=True)
Input('Delimiter', str, optional=True, default='')
Output('Joined', str)
def run(self):
super(ListToString, self).run()
string = []
for element in self._List:
string.append(str(element))
self._Joined(self._Delimiter.join(string)) |
3,495 | 315996a783d7b95fd87374a8fe2602a572de071e | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import time
import json
import os
class OitYitikuscrapyDataPipeline(object):
def open_spider(self, spider):
path ='D:\\xiti10001\\data\\{}\\'.format(time.strftime("%Y%m%d",time.localtime()))
# path = 'd:\\OITData\\zujuan\\{0}\\{1}\\'.format(time.strftime("%Y%m%d", time.localtime()), spider.name)
isExists = os.path.exists(path)
if isExists:
pass
else:
os.makedirs(path)
self.file = codecs.open(path + spider.name+'.json', 'a', encoding='utf-8')
def process_item(self, item, spider):
print('进程打印信息:',spider.name)
lines = json.dumps(dict(item), ensure_ascii=False) + '\n'
self.file.write(lines)
return item
def close_spider(self, spider):
self.file.close()
|
3,496 | b03960999fa30a55932ada7fbf731a3861b840ae | n = int(input())
lista = []
for i in range(n):
inp = int(input())
lista.append(inp)
lista.sort(reverse=True)
print(lista[0])
print(lista[1]) |
3,497 | 5229002103379ff10969e64289d5a0f36641c0a3 | auto_duration_sec = 15
teleop_duration_sec = 135 |
3,498 | c45ffe8cba8d152e346182252dbc43e22eaf83e2 | from rest_framework.views import APIView
from rest_framework.response import Response
from drf_yasg.utils import swagger_auto_schema
from theme.models import UserProfile
from hs_core.views import serializers
class UserInfo(APIView):
@swagger_auto_schema(operation_description="Get information about the logged in user",
responses={200: serializers.UserInfoSerializer})
def get(self, request):
'''
Get information about the logged in user
:param request:
:return: HttpResponse response containing **user_info**
'''
if not request.user.is_authenticated:
return Response({"title": "None", "organization": "None", "state": "None", "country": "None",
"user_type": "None"})
user_info = {"username": request.user.username}
if request.user.email:
user_info['email'] = request.user.email
if request.user.first_name:
user_info['first_name'] = request.user.first_name
if request.user.id:
user_info['id'] = request.user.id
if request.user.last_name:
user_info['last_name'] = request.user.last_name
user_profile = UserProfile.objects.filter(user=request.user).first()
if user_profile.title:
user_info['title'] = user_profile.title
if user_profile.organization:
user_info['organization'] = user_profile.organization
if user_profile.state and user_profile.state.strip() and user_profile.state != 'Unspecified':
user_info['state'] = user_profile.state.strip()
if user_profile.country and user_profile.country != 'Unspecified':
user_info['country'] = user_profile.country
if user_profile.user_type and user_profile.user_type.strip() and user_profile.user_type != 'Unspecified':
user_info['user_type'] = user_profile.user_type.strip()
return Response(user_info)
|
3,499 | a2344f405aa681daff12166b7aad1230652373de | from . import match
from . import mimetype
from .mimetype import MIMEType
def sniff_unknown(resource: bytes, sniff_scriptable: bool = False): #might need more arguments
raise NotImplementedError
def sniff_mislabeled_binary(resource: bytes) -> MIMEType:
raise NotImplementedError
def sniff_mislabeled_feed(resource: bytes) -> MIMEType:
raise NotImplementedError
def sniff(resource: bytes, mime_type_string: str = "unknown/unknown", no_sniff: bool = False, check_for_apache_bug: bool = False) -> str:
mime_type = mimetype.parse_mime_type(mime_type_string)
if mime_type.is_unknown():
return sniff_unknown(resource, sniff_scriptable=not no_sniff)
if no_sniff:
return mime_type
if check_for_apache_bug:
return sniff_mislabeled_binary(resource)
if mime_type.is_xml():
return mime_type
if mime_type.essence() == "text/html":
sniff_mislabeled_feed(resource)
if mime_type.is_image(): #TODO: implement checking suppported image by user agent
match_type = match.match_image_type_pattern(resource)
if not match_type is None:
return match_type
if mime_type.is_video_audio(): #TODO: implement checking suppported image by user agent
match_type = match.match_image_type_pattern(resource)
if not match_type is None:
return match_type
return mime_type
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.