index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,600 | 8692346c9d7d0d0853b0fd68ace790b42072e7fd | from django.shortcuts import render
from rest_framework import viewsets
from .serializers import EmployeeInfoSerializer
from .models import EmployeeInfo
# Create your views here.
class EmployeeInfoViewSet(viewsets.ModelViewSet):
queryset = EmployeeInfo.objects.all().order_by('employeeID')
serializer_class = EmployeeInfoSerializer
|
994,601 | 18e8e5f2c33f846d1b951522bc845a052e4484e9 | '''2.5. Escreva um algoritmo que leia 2 valores, insira os em duas
variáveis e permute os valores entre elas. Ao fim, imprima o valor das variáveis
antes e depois da permutação.'''
a = int(input("Número 1: "))
b = int(input("Número 2: "))
print(a, b)
aux = a
a = b
b = aux
print(a, b)
|
994,602 | 2ac76baf745e4ddb6d71d8ebdb01beb300a24fef | import os
from os.path import join
from PIL import Image
import numpy as np
SIZE_FACE = 48
EMOTIONS = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral']
additional_images_dir = join(".", "images")
with open("./finetuning.csv", 'w') as output_file:
for image_filename in os.listdir(additional_images_dir):
mood_name = image_filename.split("_")[0]
mood = EMOTIONS.index(mood_name)
image_path = join(additional_images_dir, image_filename)
image = Image.open(image_path).resize((SIZE_FACE, SIZE_FACE)).convert("L")
image_as_array = np.array(image, dtype=np.uint8)
array_as_list = list(image_as_array.tostring())
data_string = " ".join(str(num) for num in array_as_list)
print(mood, data_string, "Training", sep=",", file=output_file)
|
994,603 | 28baad9e78f3bb71ac547026f5a786fe341874a9 | # Create your views here.
from django.shortcuts import render
def sign ( request ) :
"""comment here"""
return render( request, 'consultations/index.html', {} )
|
994,604 | 60748347d39b66edee12a44184fc4ca50b0cdc92 | import sqlite3
def read_from_db():
cases_dict=dict()
conn=sqlite3.connect('agriculture.db')
c=conn.cursor()
c.execute('SELECT survey_no,name,area,district,phone FROM land_records')
data = c.fetchall()
return data
print(read_from_db())
|
994,605 | 3c64c0fd0237bb995916fcdf70428c5c4246119d | from typing import List, Tuple
from constants import UTF_8
INPUT_FILE_NAME = "ferry_directions.txt"
class Boat:
def __init__(self):
self.x = 0
self.y = 0
self.waypoint_x = 10
self.waypoint_y = 1
def north(self, delta: int):
self.waypoint_y += delta
def south(self, delta: int):
self.waypoint_y -= delta
def east(self, delta: int):
self.waypoint_x += delta
def west(self, delta: int):
self.waypoint_x -= delta
def left(self, theta_delta: int):
if theta_delta == 180:
self.waypoint_x *= -1
self.waypoint_y *= -1
elif theta_delta == 90:
self.waypoint_x, self.waypoint_y = -self.waypoint_y, self.waypoint_x
elif theta_delta == 270:
self.waypoint_x, self.waypoint_y = self.waypoint_y, -self.waypoint_x
def right(self, theta_delta: int):
self.left(360 - theta_delta)
def forward(self, steps: int):
self.x += steps * self.waypoint_x
self.y += steps * self.waypoint_y
def read_input_file() -> List[Tuple[str, int]]:
instructions = list()
with open(INPUT_FILE_NAME, "r", encoding=UTF_8) as in_file:
for line_ in in_file:
line = line_.strip()
action_str = line[0]
action_number = int(line[1:])
instructions.append((action_str, action_number))
return instructions
def follow_instruction(action: str, value: int, boat: Boat):
if action == "N":
boat.north(value)
elif action == "S":
boat.south(value)
elif action == "E":
boat.east(value)
elif action == "W":
boat.west(value)
elif action == "L":
boat.left(value)
elif action == "R":
boat.right(value)
elif action == "F":
boat.forward(value)
else:
raise ValueError(f"Unknown action type: {action}")
return
if __name__ == "__main__":
all_instructions = read_input_file()
b = Boat()
for a_str, a_num in all_instructions:
follow_instruction(a_str, a_num, b)
print(f"boat ending location: ({b.x},{b.y})")
print(f"manhattan distance from origin:", abs(b.x) + abs(b.y))
|
994,606 | 47e424c3c11bc88f65c883d9127203492b5eb29e | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedListToBST(self, head: ListNode) -> TreeNode:
temp = head
cnt = 0
while temp:
temp = temp.next
cnt +=1
self.head = head
def helper(l,r):
if l>=r:
return
mid = (l+r)//2
left = helper(l,mid)
node = TreeNode(self.head.val)
self.head = self.head.next
right = helper(mid+1,r)
node.left = left
node.right = right
return node
return helper(0,cnt)
|
994,607 | fc5d112fe7b718b469b8e67b011c007ca0960e55 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 将一个正整数分解质因数。例如:输入90,打印出90=2*3*3*5。
n = int(input('请输入一个正整数:'))
temp = []
while n!=1:
for i in range(2,n+1):
if n%i == 0:
temp.append(i)
n = int(n/i)
break
print(temp) |
994,608 | 8a55d4aab9678915b89477c58df7dc0371f29565 | import os.path
import pandas as pd
def ERCC():
ercc = pd.read_table(os.path.dirname(__file__) + '/ERCC.tsv', index_col=1)
return ercc
def reference_templates():
''' Get XML templates to query Biomart with.
'''
with open(os.path.dirname(__file__) + '/template_transcriptome.xml') as fh:
tx_template = fh.read()
with open(os.path.dirname(__file__) + '/template_gene_annotation.xml') as fh:
ga_template = fh.read()
with open(os.path.dirname(__file__) + '/template_genemap.xml') as fh:
gm_template = fh.read()
return tx_template, ga_template, gm_template
|
994,609 | 42b582db035d367e4b9c6451a7bc9510d8de5967 | for i in range(5):
for j in range(5):
print("({}, {})".format(i, j), end ="\t")
print()
# R U L D
dx = [0, -1, 0, 1]
dy = [1, 0, -1, 0]
x, y = 1, 1
#data = list(map(str, input().split()))
data = 'R R R U D'
print(data)
for val in data:
if(val == 'R'):
y += dy[0]
elif (val == 'U'):
x += dx[1]
elif (val == 'L'):
y += dy[2]
elif (val == 'D'):
x += dx[3]
if x < 1:
x = 1
elif y < 1:
y = 1
#N = int(input())
N = 5
time = [0, 0, 0]
cnt = 0
for i in range((N + 1) * 3600 - 1):
time[2] += 1
if time[2] == 60:
time[1] += 1
time[2] = 0
if time[1] ==60:
time[0] += 1
time[1] = 0
if time[0] % 10 == 3 or time[0] // 10 == 3 or time[1] % 10 == 3 or time[1] // 10 == 3 or time[2] % 10 == 3 or time[2] // 10 == 3:
cnt +=1
print(cnt)
#data = input()
data = 'a1'
data = [val for val in data]
x = ord(data[0]) - 97
y = int(data[1]) - 1
possible_pos = []
for x_a in [2, -2]:
for y_a in [1, -1]:
possible_pos.append([x + x_a, y + y_a])
for x_a in [1, -1]:
for y_a in [2, -2]:
possible_pos.append([x + x_a, y + y_a])
cnt = 0
for pos in possible_pos:
if pos[0] > 0 and pos[1] > 0 and pos[0] < 8 and pos[1] < 8:
cnt += 1
print(cnt)
data = "K1KA5CB7"
data = "AJKDLSI412K4JSJ9D"
char_list = []
int_list = []
for char in data:
if ord(char) > 64 and ord(char) < 93:
char_list.append(char)
else:
int_list.append(int(char))
char_list.sort()
for char in char_list:
print(char, end='')
print(sum(int_list)) |
994,610 | 545e1b3aa4f590593d8a800f097a2462a7d1f1e6 | # -*- coding: utf-8 -*-
import rpy2.robjects as robjects
from rpy2.robjects import r
pi = robjects.r['pi']
pi[0]
pi.r_repr()
# pi é o objeto do R
# pi[0] é o valor
# >>> pi
# >>> <FloatVector - Python:0x101ae2710 / R:0x1039724e8>
# >>> [3.141593]
robjects.r.ls(globalenv)
robjects.globalenv["a"] = 123
print(robjects.r.ls(globalenv))
robjects.r.rm("a")
print(robjects.r.ls(globalenv))
from rpy2.robjects import IntVector
from rpy2.robjects import StrVector
from rpy2.robjects import FloatVector
a = IntVector([10])
b = IntVector([15])
print a[0] + b[0]
strings = robjects.StrVector(['abc', 'def'])
integers = robjects.IntVector([1, 2, 3])
floats = robjects.FloatVector([1.1, 2.2, 3.3])
valores = IntVector([6, 7, 4, 3, 2, 0, 0, 6])
valores[4] # importante notar que python começa de 0
valores[3] # e o R começa de 1
len(valores)
max(valores)
min(valores)
import ipdb; ipdb.set_trace()
print r.sum(valores)[0]
print r.prod(valores)[0]
print r.sort(valores)
print r.mean(valores)[0]
print r.median(valores)[0]
print r.sd(valores)[0]
print r.var(valores)[0]
valores_python = list(valores)
he = IntVector([10, 2, 23, 11, 14, 35, 46, 32, 13, 51, 27, 49])
ha = he
print r.var(he)[0]
print r.cov(ha, he)[0]
print r.cor(ha, he)[0]
# funções
sqr = robjects.r('function(x) x^2')
print(sqr)
print(sqr(2))
print(sqr(IntVector([4])))
print(sqr(IntVector([4,4])))
eleva3 = robjects.r('function(a){ return(a*a*a); }')
print(eleva3)
print(eleva3(2))
print(eleva3(IntVector([4])))
print(eleva3(IntVector([4,4])))
# utilitários
r.getwd()
r.setwd("c:/docs/mydir") # lançam exceções de python
r.dir() # Lista arquivos do cwd.
import ipdb; ipdb.set_trace()
|
994,611 | b7275199675360bd5566c9de15f4d1688b57a21c | #!/usr/bin/python
def reverse(a):
return ''.join(reversed(a))
s = "Hello"
print s, "reversed is", reverse(s)
|
994,612 | c4b1f35249a64ad8d7bb961ec7958e61942522a4 | # client.py
import socket
s = socket.socket()
host="192.168.20.25"
port = 8080
s.connect((host, port))
f=open("test1.bin", "w")
while True:
read_len=0
buf={}
buf2=bytes()
print("2222222222 ")
for i in range (1, 9):
buf[i]=s.recv(512)
read_len += len(buf[i])
if len(buf[i]) == 0:
break
print("read_len1 ", read_len)
s.send(buf[i])
print("read_len2 ", read_len)
for j in range (1, i+1):
buf2 += buf[j]
f.write(buf2)
if read_len < 4096:
print("111111111 ")
break
f.close()
s.close()
|
994,613 | bb8d3c8cda2185623cbbf1791f6cadc50a41efe1 | import cv2
import numpy as np
weights = r'/Users/upasanathakuria/Desktop/People-Counting-in-Real-Time/Detectx-Yolo-V3/yolov3.weights'
config1 = r'/Users/upasanathakuria/Desktop/People-Counting-in-Real-Time/Detectx-Yolo-V3/cfg/yolov3.cfg'
class_labels = r'/Users/upasanathakuria/Desktop/People-Counting-in-Real-Time/Detectx-Yolo-V3/data/coco.names'
iou_thresh = 0.4
with open(class_labels, 'r') as f:
class_labels = [line.strip() for line in f.readlines()]
def draw_boxes(img,boxes,classesIds,class_labels,confidences,idex):
bboxs = []
if idex is not None:
for i in idex.flatten():
x,y,w,h =boxes[i].astype("int")
bboxs.append((x, y, w, h))
label=class_labels[classesIds[i]]
confidence = confidences[i]
cv2.rectangle(img,(int(x-w/2),int(y-w/2)),(int(x+w/2),int(y+w/2)),(0,255,0),3)
cv2.putText(img, str(label) + ':' + "{0:.2f}".format(confidence) , (int(x-w/2), int(y-w/2)),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 4)
return img, bboxs
def out_transformation(out,width, height,class_labels,label="person"):
boxes=[]
confidences=[]
classesIds=[]
for i in out:
for k in i:
scores=k[5:]
classes=np.argmax(scores)
confidence=scores[classes]
if (confidence>0.4) and (class_labels[classes] == label) :
confidences.append(float(confidence))
box=k[0:4]* np.array([width,height,width,height],dtype=int)
boxes.append(box)
classesIds.append(classes)
return boxes,confidences,classesIds
def infer_image(net,layer_names,img,class_labels,width,height,iou_thresh):
blob = cv2.dnn.blobFromImage(img,1/255,(416,416),swapRB=True)
net.setInput(blob)
out=net.forward(layer_names)
boxes, confidences, classesIds = out_transformation(
out, width, height, class_labels)
idex=cv2.dnn.NMSBoxes(boxes,confidences,0.5,iou_thresh)
idex = np.array(idex)
img, bboxs=draw_boxes(img,boxes,classesIds,class_labels,confidences,idex)
return img, bboxs
net=cv2.dnn.readNet(weights, config1)
layer_names = net.getLayerNames()
layer_names=[layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
if __name__ == "__main__":
cam=cv2.VideoCapture(0)
fourcc=cv2.VideoWriter_fourcc(*"MJPG")
width=int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
height=int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
writter=cv2.VideoWriter('output.avi',fourcc,30,(width,height),True)
while cam.isOpened():
_,frame=cam.read()
frame , bboxs =infer_image(net,layer_names,frame,class_labels,width,height,iou_thresh)
writter.write(frame)
cv2.imshow('output',frame)
if cv2.waitKey(10) & 0xFF==27:
break
cam.release()
writter.release()
cv2.destroyAllWindows()
|
994,614 | b1ed56f695ad398d3e27bfd71d524be250751ab8 | from core.interfaces.phylogenymodel import PhylogenyModel
from core.phylogeny.graphs.ramresidentgraph import RAMResidentGraph
class DAGModel(PhylogenyModel):
def __init__(self,threshold):
self.threshold = threshold
def create(self,malwarecorpus,fingerprintfactory,distancemetric):
self.RRG = RAMResidentGraph()
self.RRG.set_corpus(malwarecorpus)
for i in range(1,malwarecorpus.get_size()):
m = malwarecorpus.getNthCronological(i)
mins = float('infinity')
for j in range(i):
m2 = malwarecorpus.getNthCronological(j)
s = distancemetric.distance(fingerprintfactory.create(m),
fingerprintfactory.create(m2))
if s < mins:
mins = s
x = m2
if mins < self.threshold:
self.RRG.add_edge(x,m,mins)
else:
self.RRG.add_node(x)
self.RRG.add_node(m)
return self.RRG
|
994,615 | 9ff74f4d3f1b0dc3ec4a4f9b9ada3864df22e465 | #!/usr/bin/env python3
#Session 2 Class excercise
#print grid like this
print("Please make a script generating grid like this.")
print("""
+ - - - - + - - - - +
| | |
| | |
| | |
| | |
+ - - - - + - - - - +
| | |
| | |
| | |
| | |
+ - - - - + - - - - +
""")
boxsize = int(input("Please input box size: "))
gridsize = int(input("Please input grid size: "))
n = int(input("Please input a positive integer: "))
def horizontal_line(boxsize):
return "- " * boxsize + "+ "
def vertical_line(boxsize):
return " " * boxsize + "| "
def grid_print_1(n):
if not n%2:
r = "- " * int(n/2) + "+ " + "- " * int(n/2)
c = " " * int(n/2) + "| " + " " * int(n/2)
else:
r = "- " * int((n-1)/2) + "+ " + "- " * int((n-1)/2)
c = " " * int((n-1)/2) + "| " + " " * int((n-1)/2)
for _ in range(n+1):
if not _%int((n+1)/2):
print ("+ " + r + "+")
else:
print ("| " + c + "|")
print ("+ " + r + "+")
def grid_print_2(boxsize, gridsize):
r = horizontal_line(boxsize)
c = vertical_line(boxsize)
for _ in range((boxsize+1)*gridsize+1):
if not _%(boxsize+1):
print ("+ " + r*gridsize)
else:
print ("| " + c*gridsize)
print("grid_print_1 output: ")
grid_print_1(n)
print("grid_print_2 output: ")
grid_print_2(boxsize, gridsize)
# if __name__ == "__main__":
# print("module is imported")
# grid_print(11) |
994,616 | afbda4fd872c22ee0b088cc96c8b7dedc15b590d | def hangaroo(secretWord):
print('Hangaroo')
print('Guess the word that is', len(secretWord),"letters long.")
mistakesmade = 0
lettersGuessed = []
while 8 - mistakesmade > 0:
if isWordGuessed(secretWord, lettersGuessed) == True:
print('============')
print('Congratulations, HANGAROOOOOOO')
break
else:
print('============')
print('You have', 8 - mistakesmade, 'guesses remaining.')
print('letters left:', getAvailableletters(lettersGuessed))
guess= str(input('Guess a letter:')).lower()
if guess in secretWord and guess not in lettersGuessed:
lettersGuessed.append(guess)
print('GOOD JOB!:', getGuessedWord(secretWord, lettersGuessed))
elif guess in lettersGuessed:
print("You already pick this one: ", getGuessedWord(secretWord, lettersGuessed))
elif guess not in secretWord:
print("Hmmmm not that one: ", getGuessedWord(secretWord, lettersGuessed))
lettersGuessed.append(guess)
mistakesmade += 1
if 8-mistakesmade == 0:
print('============')
print('The word is', secretWord)
break
else:
continue
|
994,617 | 42d6ff59128f8931202dd53cde0df8bf275e0472 | while True:
numb = input("Give me a number: ")
if numb.isdigit():
print (float(numb)**2)
break
else:
print("Invalid") |
994,618 | bdb738c4fff7530ae7bde52d55a8cdccf70e9216 | ##teste unitário de CSV externo
# 1 - imports
import json
import pytest
import csv
import requests
from requests import HTTPError
# Leitor do Arquivo CSV
def ler_dados_do_csv():
teste_dados_csv = []
nome_arquivo = 'usuarios.csv'
try:
with open(nome_arquivo,newline='') as csvfile:
dados = csv.reader(csvfile,delimiter=',')
next(dados)
for linha in dados:
teste_dados_csv.append(linha)
return teste_dados_csv
except FileNotFoundError:
print(f'Arquivo não encontrado: {nome_arquivo}')
except Exception as fail:
print(f'Falha imprevista: {fail}')
@pytest.mark.parametrize('id,nome,sobrenome,email', ler_dados_do_csv() )
def testar_dados_usuarios_csv(id,nome,sobrenome,email): # função que testa o algo
try:
response = requests.get(f'https://reqres.in/api/users/{id}')
jsonResponse = response.json()
id_obtido = jsonResponse['data']['id']
nome_obtido = jsonResponse['data']['first_name']
sobrenome_obtido = jsonResponse['data']['last_name']
email_obtido = jsonResponse['data']['email']
print(f'id: {id_obtido} \n nome: {nome_obtido} \n sobrenome: {sobrenome_obtido} \n email: {email_obtido}')
print(f'id: {id_obtido} - nome: {nome_obtido} - sobrenome: {sobrenome_obtido} - email: {email_obtido}')
print('id:{} \n nome:{} \n sobrenome:{} \n email:{}'.format(id_obtido, nome_obtido, sobrenome_obtido, email_obtido))
print(json.dumps(jsonResponse, indent=2, sort_keys=True))
assert id_obtido == int(id)
assert nome_obtido == nome
assert sobrenome_obtido == sobrenome
assert email_obtido == email
except HTTPError as http_fail : # Para o ISTQB, descobriu rodando é falha
print(f'Um erro de HTTP aconteceu: {http_fail}')
except Exception as fail: # Qualquer exceção será tratada a seguir
print(f'Falha inesperada: {fail}') |
994,619 | a290b2cb0e14a9d563bef668073dbd102f72c2b6 |
# coding: utf-8
# # Ámbitos y funciones decoradoras
# #### NOTA: Antes de realizar esta lección debes reiniciar Jupyter Notebook para vaciar la memoria.
# In[4]:
def Hola():
number = 89
def Bienvenido():
return ("Welcome")
print(locals())
return Bienvenido()
Hola()
print(globals())
# ## Funciones decoradoras
# In[21]:
def execute_message(function):
def decorate():
print("Message executed > {}".format(function()))
return decorate
def sayHello():
return "Hello"
def sayGoodbye():
return "Goodbye"
def omg():
return "OMG!"
def cloudySkies_lilSkies_lyrics():
return '''
[Intro]
Girl, never lie to me
Ayy, girl, never lie to me
Duck from the flashin' lights, watch out when the tide comin'
All these people judgin'
Take a sip out the double cup, can't tell me nothin'
I know it's all for the better and I'm never stuntin'
I just want a girl who gon' really tell me somethin', ayy
[Chorus]
Ayy, girl, never lie to me
Girl would you ride for me? Pull up on the side for me
Duck from the flashin' lights, and watch out when the tide comin'
I know it's hard to be yourself when all these people judgin'
Take a sip out the double cup, can't tell me nothin'
I know it's all for the better and I'm never stuntin'
I just want a girl who gon' really show me somethin'
Give you the time of your life if you would stop frontin'
'''
execute_message(sayHello)()
# In[23]:
@execute_message
def sayHello():
return "Hello"
@execute_message
def sayGoodbye():
return "Goodbye"
@execute_message
def omg():
return "OMG!"
@execute_message
def cloudySkies_lilSkies_lyrics():
return '''
[Intro]
Girl, never lie to me
Ayy, girl, never lie to me
Duck from the flashin' lights, watch out when the tide comin'
All these people judgin'
Take a sip out the double cup, can't tell me nothin'
I know it's all for the better and I'm never stuntin'
I just want a girl who gon' really tell me somethin', ayy
[Chorus]
Ayy, girl, never lie to me
Girl would you ride for me? Pull up on the side for me
Duck from the flashin' lights, and watch out when the tide comin'
I know it's hard to be yourself when all these people judgin'
Take a sip out the double cup, can't tell me nothin'
I know it's all for the better and I'm never stuntin'
I just want a girl who gon' really show me somethin'
Give you the time of your life if you would stop frontin'
'''
# In[19]:
sayGoodbye()
# In[24]:
cloudySkies_lilSkies_lyrics()
# ## Pasando argumentos al decorador
# In[25]:
def execute_message(function):
def decorate(*args, **kwargs):
print("Message executed > {}".format(function(*args, **kwargs)))
return decorate
# In[26]:
sayGoodbye()
# In[27]:
@execute_message
def sayGoodbye(name):
return "Goodbye {}".format(name)
# In[28]:
sayGoodbye("Ruben")
|
994,620 | f8b37d787f1d1bc25dd868f3200f5a3b301c27b7 | # -*- coding: utf-8 -*-
#
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from original_tools import plot
if __name__ == "__main__":
# データ読み込み部分
# データの形状は、以下の通り(" | "は実際はタブ)
# user_id | store_id | user_feature1 | user_feature2 | store_feature1 | store_feature2 | num_bought | spent_total
# 1234 | 1111 | 50 | 3.8 | 1300 | 4.8 | 1 | 1031
# 2345 | 2222 | 30 | 1.1 | 670 | 10.2 | 2 | 8820
# ...
df = pd.read_csv("foo_bar.tsv", sep="\t")
# 購入金額(spent_total)の分布を可視化
# 実装済みのplot関数で描画し、ファイルに出力
# plot(data_list, graph_title, file_name)
plot(df.spent_total.values, "購入金額の分布", "histgram_of_spent_total.html")
# 可視化した分布から決めた閾値100,000円以下に絞ったデータを利用
df_without_outliers = df[df.spent_total <= 1000000]
# 上記条件に絞って購入商品数(num_bought)のHistogramを可視化
plot(df_without_outliers.num_bought.values, "購入金額500,000円以下のときの購入商品数分布",
"num_bought_distribution_whose_total_spent_leq_100000.html")
# ユーザーの特徴量と店の特徴量から購入金額を学習したいので特徴量行列を作成
# 目的変数である購入金額のカラムを除去
raw_features = df_without_outliers.drop(["spent_total"], axis=1)
# 特徴量の標準化
ss = StandardScaler()
standardized = ss.fit_transform(raw_features.values)
# Training dataとTest dataに分離
X_train, X_test, y_train, y_test = train_test_split(standardized, df_without_outliers.spent_total.values)
# 購入金額(spent_total)の学習と予測
lr = LinearRegression()
lr.fit(X_train, y_train)
prediction = lr.predict(X_test)
# 精度評価
print(f"MSE: {mean_squared_error(y_test, prediction)}")
# 各特徴量の影響度を表示
for n, c in zip(raw_features.columns, lr.coef_):
print(f"{n} : {c}")
|
994,621 | eefc7dc432a8d8193f4bd71e710105b36a48517e | from setuptools import setup, find_packages
setup(
name="nate",
version="0.0.1",
install_requires=[
"pandas>=0.25.0",
"spacy",
#"python-igraph>=0.8.0",
"tok",
"numba",
"joblib",
"matplotlib",
"networkx",
"pillow",
"stop_words",
"gensim"
], # A bunch of things will need to go here; we'll have to do an audit of every package we use
packages = find_packages(),
include_package_data=True,
author = "John McLevey, Tyler Crick, Pierson Browne", # likely more later
description = "nate (Network Analysis with TExt).",
url="http://networkslab.org",
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
)
)
|
994,622 | 2682a6abd23c26479fda626e199cc673930a3db2 |
VERY SIMPLE Python solutions (iterative and recursive), both beat 90%
https://leetcode.com/problems/balanced-binary-tree/discuss/35708
* Lang: python3
* Author: agave
* Votes: 67
```
class Solution(object):
def isBalanced(self, root):
def check(root):
if root is None:
return 0
left = check(root.left)
right = check(root.right)
if left == -1 or right == -1 or abs(left - right) > 1:
return -1
return 1 + max(left, right)
return check(root) != -1
# 226 / 226 test cases passed.
# Status: Accepted
# Runtime: 80 ms
```
Iterative, based on postorder traversal:
```
class Solution(object):
def isBalanced(self, root):
stack, node, last, depths = [], root, None, {}
while stack or node:
if node:
stack.append(node)
node = node.left
else:
node = stack[-1]
if not node.right or last == node.right:
node = stack.pop()
left, right = depths.get(node.left, 0), depths.get(node.right, 0)
if abs(left - right) > 1: return False
depths[node] = 1 + max(left, right)
last = node
node = None
else:
node = node.right
return True
# 226 / 226 test cases passed.
# Status: Accepted
# Runtime: 84 ms
```
|
994,623 | 4d4d1c218a38190eebe0a78db0c421f35243f56b | class SizeNormalization:
def __init__(self, p=None):
self.id = None
self.effective_date = None
self.value = None # cost per unit of energy
self.note = None
self.account_id = None
if p is not None:
if "id" in p:
self.id = p["id"]
if "effective_date" in p:
self.effective_date = p["effective_date"]
if "value" in p:
self.value = p["value"]
if "note" in p:
self.note = p["note"]
if "account_id" in p:
self.account_id = p["account_id"] |
994,624 | 684f16f1284c281e4ee86fd7393ef84307853307 | """Perform beam search on a decoder rnn with head layer"""
import torch
from torch import nn
import numpy as np
import torch.nn.utils.rnn as p
pack = p.pack_sequence
def sample_beam(model, input_embedding, char2idx, idx2char, k=5, maxlen=30,
start='START', use_head=True):
"""Sample using beam search
model: model to be used. It must have a head layer (or a use_head option in
forward)
input_embedding: The input embedding
char2idx: dict which maps characters to one hot indices
Must have 'START' and 'END' as keys
idx2char: dict which maps one hot indices to characters
k: size of the beam
maxlen: maximum length of a sampled word
start: which character to start with
use_head: whether to pass the input_embedding through the head layer for the
first beam expansion
"""
with torch.no_grad():
device = input_embedding.device
softmax = nn.Softmax(dim=1)
if use_head:
input_embedding = input_embedding.view(1, -1)
inp = [torch.LongTensor([char2idx[start]]).to(device)]
inp = nn.utils.rnn.pack_sequence(inp)
out, hidden = model(input_embedding, inp, use_head=use_head)
out = softmax(out.data).view(-1).cpu().numpy()
max_k = np.argsort(out)[-k:][::-1]
oldprobs = out[max_k]
words = [[i] for i in max_k]
inp = pack([torch.LongTensor([j]).to(device) for j in max_k])
if model.mode == 'LSTM':
hidden0 = torch.cat([hidden[0] for i in range(k)], dim=1)
hidden1 = torch.cat([hidden[1] for i in range(k)], dim=1)
hidden = hidden0, hidden1
else:
hidden = torch.cat([hidden for i in range(k)], dim=1)
WORDS = []
for c in range(maxlen):
out, hidden = model(hidden, inp, use_head=False)
out = softmax(out.data).cpu().numpy()
#print(out.shape)
inpnp = inp.data.detach().cpu().numpy()
done = np.where(inpnp == char2idx['END'])
out[done] = 0
if len(out[done]) != 0:
#print(out[done].shape)
for d in done[0]:
out[d][char2idx['END']] = 1
#print(done)
#print(out)
#print(out[done])
out = (oldprobs.reshape(-1, 1)*out)
max_k = np.argsort(out)[:, -k:][:, ::-1]
#print(max_k)
probs = np.array([out[i][max_k[i]] for i in range(k)])
#print(probs)
flat = probs.reshape(-1)
max_k2 = np.argsort(flat)[::-1][:k]
word_inds = max_k2//k
next_chars_inds = max_k2%k
oldprobs = flat[max_k2]
#print(oldprobs)
new_words = []
new_inp = []
for i, word_ind in enumerate(word_inds):
next_char = max_k[word_ind][next_chars_inds[i]]
if next_char == char2idx['END']:
#print("HIT AN END at word {}".format(word_ind))
WORDS.append((words[word_ind], oldprobs[i]))
#the_word = words[word_ind]
#return ''.join([idx2char[i] for i in the_word])
new_inp.append(torch.LongTensor([next_char]).to(device))
word = words[word_ind][:]
word = word + [next_char]
new_words.append(word)
words = new_words[:]
if model.mode == 'LSTM':
h1, h2 = hidden
h1, h2 = h1[0][word_inds].view(1, k, -1), h2[0][word_inds].view(1, k, -1)
hidden = h1, h2
else:
hidden = hidden[0][word_inds].view(1, k, -1)
inp = pack(new_inp)
return [''.join([idx2char[i] for i in word if i != char2idx['END']]) for word in words], oldprobs
def pass_word(word, model, input_embedding, char2idx, device, use_head=True):
"""Pass a word through the given model using the input_embedding,
Returns the output and final hidden state"""
inp = torch.LongTensor([char2idx['START']] + [char2idx[c] for c in word]).to(device)
inp = pack([inp])
out, hidden = model(input_embedding.unsqueeze(0), inp, use_head=use_head)
return out, hidden
|
994,625 | 41bb6c3469c370a68f6e065f1c81bc0f8473ecf7 | import unittest
from botoflow.decisions import decision_list, decisions
class TestDecisionList(unittest.TestCase):
def test_delete_decision(self):
dlist = decision_list.DecisionList()
dlist.append(decisions.CancelTimer(123))
self.assertTrue(dlist)
dlist.delete_decision(decisions.CancelTimer, 999)
self.assertTrue(dlist)
dlist.delete_decision(decisions.CancelTimer, 123)
self.assertFalse(dlist)
def test_to_swf(self):
dlist = decision_list.DecisionList()
dlist.append(decisions.CancelTimer(123))
swf_list = dlist.to_swf()
self.assertTrue(swf_list)
self.assertEqual(swf_list, [{'cancelTimerDecisionAttributes':
{'timerId': 123},
'decisionType': 'CancelTimer'}])
if __name__ == '__main__':
unittest.main()
|
994,626 | 2244cfaf52eb15869292124caac7ae22a0e7519f | import os
def deleteBigFiles(max_size):
file_list = [f for f in os.listdir() if os.path.isfile(f)]
for f in file_list:
filesize = os.stat(f).st_size
if filesize > max_size:
os.remove(f)
# print(f'filename: {f} \n file size: {filesize} \n current working directory: {os.getcwd()} \n\n')
directory_list = [d for d in os.listdir() if os.path.isdir(d)]
for d in directory_list:
os.chdir(d)
deleteBigFiles(max_size)
os.chdir("..")
def deleteBigFilesFor1000experiment():
max_size = 10000000
os.chdir(os.getcwd())
os.chdir("results/logs")
for folder in [folder for folder in os.listdir()]:
os.chdir(folder)
deleteBigFiles(max_size)
os.chdir("..")
|
994,627 | a403c7b3cf20a2efb3c7846dea60ac5d26c693b7 | import os
import random
import json5
import numpy as np
import tensorflow as tf
from datetime import datetime
from pprint import pformat
from .utils.loader import load_data
from .utils.logger import Logger
from .utils.params import validate_params
from .model import Model
from .interface import Interface
class Trainer:
"""
__init__: Load args and define logger
train:
Split train and dev set;
Set up tf graph
build session
mode, interface, states = self.build_model(sess)
build_model:
states = {}
Define interface
model = Model(args, sess)
"""
def __init__(self, args):
self.args = args
self.log = Logger(self.args)
def train(self):
# Setup train set and dev set
start_time = datetime.now()
train = load_data(self.args.data_dir, 'train') # looks like 'data/snli/train.txt'
dev = load_data(self.args.data_dir, self.args.eval_file) # looks like 'data/snli/test.txt'
self.log(f'train ({len(train)}) | {self.args.eval_file} ({len(dev)})')
# Setup tf graph
tf.reset_default_graph()
with tf.Graph().as_default():
config = tf.ConfigProto() # build the session and set parameters
# config.gpu_options.allow_growth = True
# config.allow_soft_replacement = True
sess = tf.Session(config=config)
with sess.as_default():
model, interface, states = self.build_model(sess)
train_batches = interface.pre_process(train)
dev_batches = interface.pre_process(dev, training=False)
self.log('setup complete: {}s'.format(str(datetime.now() - start_time).split(".")[0]))
try:
for epoch in range(states['start_epoch'], self.args.epochs + 1):
states['epoch'] = epoch
self.log.set_epoch(epoch)
batches = interface.shuffle_batch(train_batches)
for batch_id, batch in enumerate(batches):
stats = model.update(sess, batch) # get new stats: updates, loss, lr, gnorm, summary
self.log.update(stats)
eval_per_updates = self.eval_per_updates \
if model.updates > self.args.eval_warmup_steps else self.args.eval_per_updates_warmup
if model.updates % eval_per_updates == 0 \
or (self.args.eval_epoch and batch_id + 1 == len(batches)):
score, dev_stats = model.evaluate(sess, dev_batches)
if score > states['best_eval']:
states['best_eval'], states['best_epoch'], states['best_step'] = \
score, epoch, model.updates
if self.args.save:
model.save(states, name=model.best_model_name)
self.log.log_eval(dev_stats)
if self.args.save_all:
model.save(states)
model.save(states, name='last')
if model.updates - states['best_step'] > self.args.early_stopping \
and model.updates > self.args.min_steps:
raise EarlyStop('[Tolerance reached. Training is stopped early.]')
if states['loss'] > self.args.max_loss:
raise EarlyStop('[Loss exceeds tolerance. Unstable training is stopped early.]')
if states['lr'] < self.args.min_lr - 1e-6:
raise EarlyStop('[Learning rate has decayed below min_lr. Training is stopped early.]')
self.log.newline()
self.log('Training complete.')
except KeyboardInterrupt:
self.log.newline()
self.log(f'Training interupted. Stopped early')
except EarlyStop as e:
self.log.newline()
self.log(str(e))
self.log(f'best dev score {states["best_eval"]} at step {states["best_step"]} '
f'(epoch {states["best_epoch"]}).')
self.log(f'best eval stats [{self.log.best_eval_str}]')
training_time = str(datetime.now() - start_time).split('.')[0]
self.log(f'Training time: {training_time}.')
states['start_time'] = str(start_time).split('.')[0]
states['training_time'] = training_time
return states
def build_model(self, sess):
states = {}
interface = Interface(self.args, self.log)
# import pdb; pdb.set_trace()
self.log(f'#classes: {self.args.num_classes}; #vocab: {self.args.num_vocab}')
if self.args.seed: # Set seed to random, np and tf
random.seed(self.args.seed)
np.random.seed(self.args.seed)
tf.set_random_seed(self.args.seed)
model = Model(self.args, sess)
import pdb; pdb.set_trace()
sess.run(tf.global_variables_initializer())
embeddings = interface.load_embeddings()
model.set_embeddings(sess, embeddings)
# set initial states
states['start_epoch'] = 1
states['best_eval'] = 0.
states['best_epoch'] = 0
states['best_step'] = 0
self.log(f'trainable params: {model.num_parameters():,d}')
self.log(f'trainable parameters (exclude embeddings): {model.num_parameters(exclude_embedding=True):,d}')
validate_params(self.args)
with open(os.path.join(self.args.summary_dir, 'args.json5'), 'w') as f:
args = {k: v for k, v in vars(self.args).items() if not k.startswith('_')}
json5.dump(args, f, indent=2) # indent: print across multiple lines
self.log(pformat(vars(self.args), indent=2, width=120))
return model, interface, states
class EarlyStop(Exception):
pass |
994,628 | dddfcce25745324551bd42c471489b2120ceaec2 | import pytest
from .. import get_translator
from ..translators import TranslationError
def test_translation_smoke():
"""Translating to morse and back to english should yield the same string
in upper case"""
english_to_morse = get_translator("english", "morse")
morse_to_english = get_translator("morse", "english")
morse = english_to_morse.translate("hello world")
english = morse_to_english.translate(morse)
assert english == "HELLO WORLD"
def test_translation_unknown_char():
"""Translating unknown character should raise an error"""
english_to_morse = get_translator("english", "morse")
with pytest.raises(TranslationError):
english_to_morse.translate("ä")
|
994,629 | 9f3a6154eae1d7a02071b32a54f12f2115f4f1da | import argparse
import pandas as pd
from mlp import KerasDenseMLP
from data import DataProcessor
parser = argparse.ArgumentParser()
"""
Define the necessary parameters for running the neural network
The number of epochs is not required
"""
parser.add_argument(
"-n", "--neurons",
nargs="+", help="Number of Neurons for each Dense hidden layer", type=int
)
parser.add_argument("-lr", "--learning", help="Learning Constant", type=float)
parser.add_argument("-hours", "--hours", help="Number of hours to consider for prediction", type=int)
parser.add_argument("-e", "--epochs", help="Number of epochs", type=int)
parser.add_argument("-b", "--batch", help="Batch size", type=int)
def main(args):
try:
hours = args.hours if args.hours else 1
filename = "dataset.csv"
dataset = pd.read_csv(filename, header=0, index_col=0)
processor = DataProcessor(dataset)
processor.shift(hours)
x_data, y_data = processor.to_numpy_arrays(hours)
processor.build_dataset(x_data, y_data)
mld = KerasDenseMLP(processor=processor,args=args)
try:
mld.model.load_weights(mld.checkpoint)
mld.evaluate()
mld.predict()
except Exception as error:
print("Error trying to load checkpoint.")
print(error)
mld.train()
mld.evaluate()
mld.predict()
except ValueError as e:
print ('Error: ' + e.message)
if __name__ == "__main__":
main(parser.parse_args()) |
994,630 | 6b63c8bea00523e4f5897088cdf512d889912fce | import bson
import click
import itertools
import logging
import pendulum
import requests
from concurrent.futures import ThreadPoolExecutor
from pprint import pprint
def iter_date(
start_date: pendulum.datetime, end_date: pendulum.datetime, chunk_size=59
):
if end_date < start_date:
raise ValueError(
"start_date:%s should not large than end_date:%s", start_date, end_date
)
while start_date <= end_date:
new_end_date = min(start_date.add(days=chunk_size), end_date)
yield start_date, new_end_date
start_date = new_end_date.add(days=1)
class ShanbayClient:
def __init__(self, token):
self.token = token
self._session = requests.Session()
if token:
self._session.cookies.set("auth_token", token)
self._session.headers.update(
{
"X-Device": "android.ticktick, MI 8, 5003, 5cab671122d4db0dde5a941c, android_xiaomi_dd,",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 9; MI 8 MIUI/9.3.28)",
}
)
def checkin_calendar(self, user_id, start_date, end_date):
"""
区间两边闭合,最多返回 60 条,按打卡天数降序
"""
return self._session.get(
"https://apiv3.shanbay.com/uc/checkin/calendar/dates",
params={
"user_id": user_id,
"start_date": start_date.to_date_string() or "",
"end_date": end_date.to_date_string() or "",
},
).json()
class DiDaClient:
def __init__(self, token):
self.token = token
self._session = requests.Session()
self._session.headers.update(
{
"Authorization": "OAuth " + self.token,
"X-Device": "android.ticktick, MI 8, 5003, 5cab671122d4db0dde5a941c, android_xiaomi_dd,",
"User-Agent": "Dalvik/2.1.0 (Linux; U; Android 9; MI 8 MIUI/9.3.28)",
}
)
def get_habits(self):
return self._session.get("https://api.dida365.com/api/v2/habits").json()
def batch_habits(self, data):
# 无法修改 createdTime
return self._session.post(
"https://api.dida365.com/api/v2/habits/batch", json=data
).json()
def batch_checkin(self, data):
return self._session.post(
"https://api.dida365.com/api/v2/habitCheckins/batch", json=data
).json()
def calendar(user_id, start_date, end_date):
"""
返回 API 一样的结果,但没有长度限制,日志按打卡日期递增排序
"""
client = ShanbayClient(None)
start_date = pendulum.parse(start_date)
end_date = pendulum.parse(end_date)
checkin_days_num = -1
def fn(args):
result = client.checkin_calendar(user_id, *args)
nonlocal checkin_days_num
checkin_days_num = result["checkin_days_num"]
print("fetched:", args, len(result["logs"]))
return result["logs"]
with ThreadPoolExecutor() as executor:
logs = sorted(
itertools.chain(*executor.map(fn, iter_date(start_date, end_date))),
key=lambda l: l["date"],
)
return {"checkin_days_num": checkin_days_num, "logs": logs}
def log_to_checkin(habit_id, logs):
for log in logs:
date = pendulum.parse(log["date"], tz="Asia/Shanghai")
checkin = {
"id": str(bson.ObjectId()),
"habitId": habit_id,
"checkinStamp": date.year * 10000 + date.month * 100 + date.day,
"checkinTime": date.isoformat(),
}
yield checkin
@click.command()
@click.pass_context
@click.option(
"-t",
"--token",
type=click.STRING,
required=True,
help="滴答清单认证token, 名称为 t 的 cookie",
)
@click.option("-u", "--user_id", type=click.STRING, required=True, help="扇贝用户 ID, 为数字")
@click.option(
"-s", "--start_date", type=click.STRING, default="2016-01-01", help="起始日期"
)
@click.option(
"-e",
"--end_date",
type=click.STRING,
default=pendulum.now().to_date_string(),
help="结束日期",
)
@click.option("-d", "--delete", type=click.BOOL, help="是否删除相同名字的习惯")
def cli(ctx, token, user_id, start_date, end_date, delete):
logger: logging.Logger = ctx.obj.logger
result = calendar(user_id, start_date, end_date)
checkin_days_num, logs = result["checkin_days_num"], result["logs"]
assert checkin_days_num == len(logs)
if checkin_days_num == 0:
logger.warning("No checkin logs, exit")
return
client = DiDaClient(token)
name = "扇贝打卡"
if delete:
habits = client.get_habits()
delete_ids = []
for h in habits:
if h["name"] == name:
print("delete matched habit")
pprint(h)
delete_ids.append(h["id"])
result = client.batch_habits({"add": [], "delete": delete_ids, "update": []})
pprint(result)
habit_id = str(bson.ObjectId())
tz = "Asia/Shanghai"
habit = {
"name": name,
"id": habit_id,
"createdTime": pendulum.parse(logs[0]["date"], tz=tz).isoformat(),
"modifiedTime": pendulum.parse(logs[1]["date"], tz=tz).isoformat(),
"totalCheckIns": checkin_days_num,
"color": "#209E85",
"encouragement": "Shanbay, feel the change",
"iconRes": "habit_learn_words",
"sortOrder": -1374389534720,
"status": 0,
}
result = client.batch_habits({"add": [habit], "delete": [], "update": []})
pprint(result)
result = client.batch_checkin(
{"add": list(log_to_checkin(habit_id, logs)), "delete": [], "update": []}
)
pprint(result)
|
994,631 | 1688cdb0c379f73902a19e6b18e1703bf9530407 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 05 23:08:50 2016
@author: Nandini Bhosale
"""
from scipy import linspace,exp
from scipy.optimize import fsolve
from scipy.integrate import odeint,trapz
import random
from pylab import plot,show,subplot,figure
"""PI Controller for cstr"""
Fis=2.0
Cais=2.0
k0=0.2
E=10.0
R=8.314
Tis=100.0
J=2.0
p=1000.0
Cp=4.0
Qc=1.0
A=0.1
Re=0.5
Kc1=50.0
Kc2=20000.0
TTi1=0.1
TTi2=0.001
"""Steady state calculation"""
def steady(x):
h=x[0]
Ca=x[1]
F=h/Re
T=x[2]
e1=(Fi-F)/A
e2=Fi/A/h*(Cai-Ca)-k0*exp(-E/R/T)*Ca
e3=Fi/A/h*(Ti-T)+J*k0*exp(-E/R/T)*Ca-Qc/p/Cp/A/h
return [e1,e2,e3]
y=[]
z=[]
v=[]
"""Integration using Trapz"""
def control(x,t):
h=x[0]
Ca=x[1]
y.append(h-hs)
v.append(t)
Fc=F+ Kc1*(h-hs)+ Kc1/TTi1*trapz(y,v)
T=x[2]
z.append(T-Ts)
Q=Qc+ Kc2*(T-Ts)+ Kc2/TTi2*trapz(z,v)
e1=(Fi-Fc)/A
e2=Fi/A/h*(Cai-Ca)-k0*exp(-E/R/T)*Ca
e3=Fi/A/h*(Ti-T)+J*k0*exp(-E/R/T)*Ca-Q/p/Cp/A/h
return [e1,e2,e3]
tt=1.0
Fi=Fis
Cai=Cais
Ti=Tis
ini=fsolve(steady,[1,1,100])
[hs,Cas,Ts]=ini
figure()
while tt<10.0:
F=hs/Re
#Fi=random.randint(80,120)/100.0*Fi
Ti=random.randint(80,120)/100.0*Ti
t=linspace(tt-1,tt,11)
x=odeint(control,ini,t)
plot(t,x[:,2])
ini=x[-1]
tt=tt+1
show()
import scipy
y=scipy.array(y)
z=scipy.array(z)
err=trapz(abs(y),v)
err2=trapz(abs(z),v)
print err,err2
|
994,632 | 22a2cfed5dacae73f6088e868acc73aa02a57429 | from django.urls import path
from . import views
urlpatterns = [
path('<int:dijete_pk>/novo/', views.NapredakCreateView.as_view(), name="stvori-napredak"),
]
|
994,633 | 70be6b382a022645c50a3095a1e39d0e20b32c89 | #! /usr/bin/python3
from io import StringIO
import os, sys
import unittest
from unittest.mock import patch, MagicMock
from .services import Services
from .models import Code
path = os.path.dirname(__file__)
if path not in sys.path:
sys.path.insert(0, path)
class TestServices(unittest.TestCase):
def setUp(self):
self.code = Code(
code="123",
size=3)
self.service = Services()
def test_search_existing_code_true(self):
code = "123"
response = self.service.search_existing_code(code)
self.assertTrue(response)
def test_search_existing_code_false(self):
code = "128"
response = self.service.search_existing_code(code)
self.assertFalse(response) |
994,634 | bbcea5db2d94ac442074591d186270cb6e2a877b | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from ..core import variables
from ..core.kernel import Kernel
class KernelMix(Kernel):
def __init__(
self,
kernel_list,
normalization = tf.nn.softmax,
name=""):
self._kernel_list = kernel_list
self._normalization = normalization
for k in kernel_list:
assert isinstance(k, Kernel), \
" every element must be an instance of Kernel class. "
super(KernelMix, self).__init__(
'kernelmix' + str(len(kernel_list)) + name,
False)
def apply(self, a, b):
## a,b :: { batch, output_atoms, new_w, new_h, depth * np.prod(ksizes) } + repdim
bias = variables.weight_variable(
[len(self._kernel_list)],
name="mixing_coeficients",
initializer=tf.compat.v1.initializers.ones()
)
#c = self._normalization(bias)
s = tf.zeros(a.shape.as_list()[:-2]+ [1,1],dtype=tf.float32)
for i in range(len(self._kernel_list)):
with tf.compat.v1.variable_scope('component' + str(i), reuse=tf.compat.v1.AUTO_REUSE):
s = s + bias[i] * self._kernel_list[i].take(a,b)
return s
class MonoKernelMix(KernelMix):
def __init__(
self,
kernel,
degree,
normalization=tf.nn.softmax,
name=""):
assert isinstance(kernel, Kernel), \
" kernel must be an instance of Kernel class. "
super(MonoKernelMix, self).__init__(
kernel_list=[kernel] * degree,
normalization=normalization,
name="monokernel" + str(degree) + name)
|
994,635 | 8b75cd2d47ae1b19766390b99157a645070afcaa | from collections import namedtuple, defaultdict
import pandas as pd
import numpy as np
import pdb
import re
day_re = re.compile('.*Day\s(\d+).*')
assign_re = re.compile('.*Assign.*\s(\d+).*')
mid_re = re.compile('.*Mid.*Combi*')
def make_tuple(in_dict,tupname='values'):
"""
make a named tuple from a dictionary
Parameters
==========
in_dict: dictionary
Any python object with key/value pairs
tupname: string
optional name for the new namedtuple type
Returns
=======
the_tup: namedtuple
named tuple with keys as attributes
"""
the_tup = namedtuple(tupname, in_dict.keys())
the_tup = the_tup(**in_dict)
return the_tup
def stringify_column(df,id_col=None):
"""
turn a column of floating point numbers into characters
Parameters
----------
df: dataframe
input dataframe from quiz or gradebook
id_col: str
name of student id column to turn into strings
either 'SIS User ID' or 'ID' for gradebook or
'sis_id' or 'id' for quiz results
Returns
-------
modified dataframe with ids turned from floats into strings
"""
float_ids=df[id_col].values
#
# if points_possible is present it will be NaN, set to zero
#
try:
float_ids[np.isnan(float_ids)]=0.
the_ids = df[id_col].values.astype(np.int)
index_vals = [f'{item:d}' for item in the_ids]
except TypeError:
index_vals = float_ids
df[id_col]=index_vals
return pd.DataFrame(df)
def clean_id(df,id_col=None):
"""
give student numbers as floating point, turn
into 8 character strings, dropping duplicate rows
in the case of multiple attempts
Parameters
----------
df: dataframe
input dataframe from quiz or gradebook
id_col: str
name of student id column to turn into strings
either 'SIS User ID' for gradebook or
'sis_id' quiz results
Returns
-------
modified dataframe with duplicates removed and index set to 8 character
student number
"""
stringify_column(df,id_col)
df=df.set_index(id_col,drop=False)
df.drop_duplicates(id_col,keep='first',inplace=True)
return pd.DataFrame(df)
|
994,636 | 31cc385049f28eb5868bbf196b467e5da7ea4ceb | class Solution(object):
def removeInvalidParentheses(self, s):
"""
:type s: str
:rtype: List[str]
"""
# if not s:
# return [""]
rm_l, rm_r = self.min_rm_paren(s)
res = set()
self.dfs(res, [], 0, s, rm_l, rm_r, 0)
return list(res)
# diff: l_paren - r_paren in the curr path
# rm_l: min of removal of l_paren
def dfs(self, res, path, idx, s, rm_l, rm_r, diff):
if idx == len(s) and diff == 0 and rm_l == 0 and rm_r == 0:
res.add(''.join(path))
return
if idx >= len(s) or rm_l < 0 or rm_r < 0 or diff < 0:
return
ch = s[idx]
if ch == '(':
# remove
self.dfs(res, path, idx + 1, s, rm_l - 1, rm_r, diff)
# select
path.append(ch)
self.dfs(res, path, idx + 1, s, rm_l, rm_r, diff + 1)
path.pop()
elif ch == ')':
# remove
self.dfs(res, path, idx + 1, s, rm_l, rm_r - 1, diff)
# select
path.append(ch)
self.dfs(res, path, idx + 1, s, rm_l, rm_r, diff - 1)
path.pop()
else:
# select
path.append(ch)
self.dfs(res, path, idx + 1, s, rm_l, rm_r, diff)
path.pop()
def min_rm_paren(self, s):
rm_l, rm_r = 0, 0
for ch in s:
if ch == '(':
rm_l += 1
elif ch == ')':
if rm_l > 0:
rm_l -= 1
else:
rm_r += 1
return rm_l, rm_r |
994,637 | 3251cabc21583e701d9dbd4c5681411a79cec486 | import gym
import numpy as np
import minerl
import torch
import warnings
import os
import traceback
from stable_baselines3.common.utils import get_device
from minerl.data import BufferedBatchIter
class DummyEnv(gym.Env):
"""
A simplistic class that lets us mock up a gym Environment that is sufficient for our purposes
without actually going through the whole convoluted registration process.
"""
def __init__(self, action_space, observation_space):
self.action_space = action_space
self.observation_space = observation_space
def step(self, action):
if isinstance(self.action_space, gym.spaces.Dict):
assert isinstance(action, dict)
return self.observation_space.sample(), 0, True, {}
def reset(self):
return self.observation_space.sample()
class NestableObservationWrapper(gym.ObservationWrapper):
def observation(self, observation):
if hasattr(self.env, 'observation'):
return self._observation(self.env.observation(observation))
else:
return self._observation(observation)
def _observation(self, observation):
raise NotImplementedError
class NormalizeObservations(NestableObservationWrapper):
def __init__(self, env, high_val=255):
super().__init__(env)
self.high_val = high_val
def _observation(self, observation):
assert observation.max() <= self.high_val, f"Observation greater than high val {self.high_val} found"
return observation/self.high_val
class ExtractPOVAndTranspose(NestableObservationWrapper):
"""
Basically what it says on the tin. Extracts only the POV observation out of the `obs` dict,
and transposes those observations to be in the (C, H, W) format used by stable_baselines and imitation
"""
def __init__(self, env):
super().__init__(env)
non_transposed_shape = self.env.observation_space['pov'].shape
self.high = np.max(self.env.observation_space['pov'].high)
transposed_shape = (non_transposed_shape[2],
non_transposed_shape[0],
non_transposed_shape[1])
# Note: this assumes the Box is of the form where low/high values are vector but need to be scalar
transposed_obs_space = gym.spaces.Box(low=np.min(self.env.observation_space['pov'].low),
high=np.max(self.env.observation_space['pov'].high),
shape=transposed_shape,
dtype=np.uint8)
self.observation_space = transposed_obs_space
def _observation(self, observation):
# Minecraft returns shapes in NHWC by default
return np.swapaxes(observation['pov'], -1, -3)
class Testing10000StepLimitWrapper(gym.wrappers.TimeLimit):
"""
A simple wrapper to impose a 10,000 step limit, for environments that don't have one built in
"""
def __init__(self, env):
super().__init__(env, 10000)
def wrap_env(env, wrappers):
"""
Wrap `env` in all gym wrappers specified by `wrappers`
"""
for wrapper, args in wrappers:
env = wrapper(env, **args)
return env
def optional_observation_map(env, inner_obs):
"""
If the env implements the `observation` function (i.e. if one of the
wrappers is an ObservationWrapper), call that `observation` transformation
on the observation produced by the inner environment
"""
if hasattr(env, 'observation'):
return env.observation(inner_obs)
else:
return inner_obs
def optional_action_map(env, inner_action):
"""
This is doing something slightly tricky that is explained in the documentation for
RecursiveActionWrapper (which TODO should eventually be in MineRL)
Basically, it needs to apply `reverse_action` transformations from the inside out
when converting the actions stored and used in a dataset
"""
if hasattr(env, 'wrap_action'):
return env.wrap_action(inner_action)
else:
return inner_action
def recursive_squeeze(dictlike):
"""
Take a possibly-nested dictionary-like object of which all leaf elements are numpy ar
"""
out = {}
for k, v in dictlike.items():
if isinstance(v, dict):
out[k] = recursive_squeeze(v)
else:
out[k] = np.squeeze(v)
return out
def warn_on_non_image_tensor(x):
"""Do some basic checks to make sure the input image tensor looks like a
batch of stacked square frames. Good sanity check to make sure that
preprocessing is not being messed up somehow."""
stack_str = None
def do_warning(message):
# issue a warning, but annotate it with some information about the
# stack (specifically, basenames of code files and line number at the
# time of exception for each stack frame except this one)
nonlocal stack_str
if stack_str is None:
frames = traceback.extract_stack()
stack_str = '/'.join(
f'{os.path.basename(frame.filename)}:{frame.lineno}'
# [:-1] skips the current frame
for frame in frames[:-1])
warnings.warn(message + f" (stack: {stack_str})")
# check that image has rank 4
if x.ndim != 4:
do_warning(f"Image tensor has rank {x.ndim}, not rank 4")
# check that H=W
if x.shape[2] != x.shape[3]:
do_warning(
f"Image tensor shape {x.shape} doesn't have square images")
# check that image is in [0,1] (approximately)
# this is the range that SB uses
v_min = torch.min(x).item()
v_max = torch.max(x).item()
if v_min < -0.01 or v_max > 1.01:
do_warning(
f"Input image tensor has values in range [{v_min}, {v_max}], "
"not expected range [0, 1]")
std = torch.std(x).item()
if std < 0.05:
do_warning(
f"Input image tensor values have low stddev {std} (range "
f"[{v_min}, {v_max}])")
def get_data_pipeline_and_env(task_name, data_root, wrappers, dummy=True):
"""
This code loads a data pipeline object and creates an (optionally dummy) environment with the
same observation and action space as the (wrapped) environment you want to train on
:param task_name: The name of the MineRL task you want to get data for
:param data_root: For manually specifying a MineRL data root
:param wrappers: The wrappers you want to apply to both the loaded data and live environment
"""
data_pipeline = minerl.data.make(environment=task_name,
data_dir=data_root)
if dummy:
env = DummyEnv(action_space=data_pipeline.action_space,
observation_space=data_pipeline.observation_space)
else:
env = gym.make(task_name)
wrapped_env = wrap_env(env, wrappers)
return data_pipeline, wrapped_env
def create_data_iterator(
wrapped_dummy_env: gym.Env,
data_pipeline: minerl.data.DataPipeline,
batch_size: int,
buffer_size: int = 15000,
num_epochs: int = None,
num_batches: int = None,
remove_no_ops: bool = False,
) -> dict:
"""
Construct a data iterator that (1) loads data from disk, and (2) wraps it in the set of
wrappers that have been applied to `wrapped_dummy_env`.
:param wrapped_dummy_env: An environment that mimics the base environment and wrappers we'll be using for training,
but doesn't actually call Minecraft
:param data_pipeline: A MineRL DataPipeline object that can handle loading data from disk
:param batch_size: The batch size we want the iterator to produce
:param num_epochs: The number of epochs we want the underlying iterator to run for
:param num_batches: The number of batches we want the underlying iterator to run for
:param remove_no_ops: Whether to remove transitions with no-op demonstrator actions from batches
as they are generated. For now, this corresponds to all-zeros.
:yield: Wrapped observations and actions in a dict with the keys "obs", "acts", "rews",
"next_obs", "dones".
"""
buffered_iterator = BufferedBatchIter(data_pipeline, buffer_target_size=buffer_size)
for current_obs, action, reward, next_obs, done in buffered_iterator.buffered_batch_iter(batch_size=batch_size,
num_epochs=num_epochs,
num_batches=num_batches):
wrapped_obs = optional_observation_map(wrapped_dummy_env,
recursive_squeeze(current_obs))
wrapped_next_obs = optional_observation_map(wrapped_dummy_env,
recursive_squeeze(next_obs))
wrapped_action = optional_action_map(wrapped_dummy_env,
recursive_squeeze(action))
if remove_no_ops:
# This definitely makes assumptions about the action space, namely that all-zeros corresponds to a no-op
not_no_op_indices = wrapped_action.sum(axis=1) != 0
wrapped_obs = wrapped_obs[not_no_op_indices]
wrapped_next_obs = wrapped_next_obs[not_no_op_indices]
wrapped_action = wrapped_action[not_no_op_indices]
return_dict = dict(obs=wrapped_obs,
acts=wrapped_action,
rews=reward,
next_obs=wrapped_next_obs,
dones=done)
yield return_dict
|
994,638 | 7b5ed4cb102e512fff8f8b6b27112c9b50fa4f8b | '''DNA object classes.'''
import collections
import os
import re
import shutil
import subprocess
import tempfile
import coral.analysis
import coral.reaction
import coral.seqio
from ._sequence import process_seq, reverse_complement
from ._sequence import NucleotideSequence
class DNA(NucleotideSequence):
'''DNA sequence.'''
def __init__(self, dna, bottom=None, topology='linear', stranded='ds',
features=None, run_checks=True, id=None, name=''):
'''
:param dna: Input sequence (DNA).
:type dna: str
:param bottom: Manual input of bottom-strand sequence. Enables both
mismatches and initializing ssDNA.
:type bottom: str
:param topology: Topology of DNA - 'linear' or 'circular'.
:type topology: str
:param stranded: Strandedness of DNA - 'ss' for single-stranded or
'ds' for double-stranded.
:type stranded: str
:param features: List of annotated features.
:type features: list
:param run_checks: Check inputs / formats (disabling increases speed):
alphabet check
case
:type run_checks: bool
:param id: An optional (unique) id field for your DNA sequence.
:type id: str
:param name: Optional name field for your DNA sequence.
:type name: str
:returns: coral.DNA instance.
:rtype: coral.DNA
:raises: ValueError if an element of `features` isn't of type
coral.Feature.
ValueError if top and bottom strands have different lengths.
ValueError if top and bottom strands are not complementary.
'''
# Convert to uppercase, run alphabet check
super(DNA, self).__init__(dna, 'dna', features=features,
run_checks=run_checks)
# Set topology
self.topology = topology
# Set strandedness
self.stranded = stranded
# If bottom was specified, check it + add it
if bottom:
self._bottom = bottom
if run_checks:
self._bottom = process_seq(bottom, 'dna')
if len(self._bottom) != len(self._sequence):
msg = 'Top and bottom strands are difference lengths.'
raise ValueError(msg)
else:
self._bottom = ''.join(['-' for x in self._sequence])
# NOTE: inefficient to assign blanks the rev comp, but cleaner code
if stranded == 'ds':
self._bottom = str(reverse_complement(self._sequence, 'dna'))
# Set id
self.id = id
# Set name
self.name = name
def ape(self, ape_path=None):
'''Open in ApE.'''
cmd = 'ApE'
if ape_path is None:
# Check for ApE in PATH
ape_executables = []
for path in os.environ['PATH'].split(os.pathsep):
exepath = os.path.join(path, cmd)
ape_executables.append(os.access(exepath, os.X_OK))
if not any(ape_executables):
raise Exception('Ape not in PATH. Use ape_path kwarg.')
else:
cmd = ape_path
# Check whether ApE exists in PATH
tmp = tempfile.mkdtemp()
if self.name is not None and self.name:
filename = os.path.join(tmp, '{}.ape'.format(self.name))
else:
filename = os.path.join(tmp, 'tmp.ape')
coral.seqio.write_dna(self, filename)
process = subprocess.Popen([cmd, filename])
# Block until window is closed
try:
process.wait()
shutil.rmtree(tmp)
except KeyboardInterrupt:
shutil.rmtree(tmp)
def bottom(self):
'''Return the raw string of the Crick (bottom) strand.
:returns: The Crick strand.
:rtype: str
'''
return self._bottom
def copy(self):
'''Create a copy of the current instance.
:returns: A safely-editable copy of the current sequence.
:rtype: coral.DNA
'''
# Significant performance improvements by skipping alphabet check
features_copy = [feature.copy() for feature in self.features]
return type(self)(self._sequence, bottom=self._bottom,
topology=self.topology, stranded=self.stranded,
features=features_copy, id=self.id, name=self.name,
run_checks=False)
def circularize(self):
'''Circularize linear DNA.
:returns: A circularized version of the current sequence.
:rtype: coral.DNA
'''
if self.top()[-1] == '-' and self.bottom()[0] == '-':
raise ValueError('Cannot circularize - termini disconnected.')
if self.bottom()[-1] == '-' and self.top()[0] == '-':
raise ValueError('Cannot circularize - termini disconnected.')
copy = self.copy()
copy.topology = 'circular'
return copy
def extract(self, name, remove_subfeatures=False):
return super(DNA, self).extract(name, 'N',
remove_subfeatures=remove_subfeatures)
def flip(self):
'''Flip the DNA - swap the top and bottom strands.
:returns: Flipped DNA (bottom strand is now top strand, etc.).
:rtype: coral.DNA
'''
copy = self.copy()
copy._sequence, copy._bottom = copy._bottom, copy._sequence
return copy
def gc(self):
'''Find the frequency of G and C in the current sequence.'''
gc_n = len([base for base in self if str(base) == 'C' or
str(base) == 'G'])
return float(gc_n) / len(self)
def insert(self, sequence, index):
inserted = super(DNA, self).insert(sequence, index)
inserted.topology = self.topology
return inserted
def is_rotation(self, other):
if len(self) != len(other):
return False
for i in range(len(self)):
if self.rotate(i) == other:
return True
# If all else fails, check reverse complement
rc = self.reverse_complement()
for i in range(len(self)):
if rc.rotate(i) == other:
return True
return False
def linearize(self, index=0):
'''Linearize circular DNA at an index.
:param index: index at which to linearize.
:type index: int
:returns: A linearized version of the current sequence.
:rtype: coral.DNA
:raises: ValueError if the input is linear DNA.
'''
if self.topology == 'linear':
raise ValueError('Cannot relinearize linear DNA.')
copy = self.copy()
copy.topology = 'linear'
copy = copy[index:] + copy[:index]
return copy
def locate(self, pattern):
'''Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: A list of top and bottom strand indices of matches.
:rtype: list of lists of indices (ints)
:raises: ValueError if the pattern is longer than either the input
sequence (for linear DNA) or twice as long as the input
sequence (for circular DNA).
'''
# TODO: If linear, should use the methods in BaseSequence
if self.topology == 'circular':
if len(pattern) > 2 * len(self):
raise ValueError('Pattern too long.')
else:
if len(pattern) > len(self):
raise ValueError('Pattern too long.')
pattern = str(pattern).upper()
regex = '(?=' + pattern + ')'
if self.topology == 'circular':
r = len(pattern) - 1
l = len(self) - r + 1
top = self._sequence[l:] + self._sequence + self._sequence[:r]
bottom = self._bottom[l:] + self._bottom + self._bottom[:r]
else:
top = self._sequence
bottom = self._bottom
top_starts = [index.start() for index in re.finditer(regex, top)]
bottom_starts = [index.start() for index in re.finditer(regex, bottom)]
# Adjust indices if doing circular search
if self.topology == 'circular' and len(pattern) > 1:
top_starts = [start - r + 1 for start in top_starts]
bottom_starts = [start - r + 1 for start in bottom_starts]
return [top_starts, bottom_starts]
def mw(self):
'''Calculate the molecular weight.
:returns: The molecular weight of the current sequence.
:rtype: float
'''
counter = collections.Counter((self._sequence + self._bottom).lower())
mw_a = counter['a'] * 313.2
mw_t = counter['t'] * 304.2
mw_g = counter['g'] * 289.2
mw_c = counter['c'] * 329.2
return mw_a + mw_t + mw_g + mw_c
def rotate(self, index):
'''Orient DNA to index (only applies to circular DNA).
:param index: DNA position at which to re-zero the DNA.
:type index: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.DNA
:raises: ValueError if applied to linear sequence or `index` is
negative.
'''
if self.topology == 'linear' and index != 0:
raise ValueError('Cannot rotate linear DNA')
if index < 0:
raise ValueError('Rotation index must be positive')
else:
return (self[index:] + self[0:index]).circularize()
def rotate_by_feature(self, featurename):
'''Reorient the DNA based on a feature it contains (circular DNA only).
:param featurename: A uniquely-named feature.
:type featurename: str
:returns: The current sequence reoriented at the start index of a
unique feature matching `featurename`.
:rtype: coral.DNA
:raises: ValueError if there is no feature of `featurename` or
more than one feature matches `featurename`.
'''
# REFACTOR: Parts are redundant with .extract()
matched = []
for feature in self.features:
if feature.name == featurename:
matched.append(feature.copy())
count = len(matched)
if count == 1:
return self.rotate(matched[0].start)
elif count > 1:
raise ValueError('More than one feature has that name.')
else:
raise ValueError('No such feature in the sequence.')
def reverse_complement(self):
'''Reverse complement the DNA.
:returns: A reverse-complemented instance of the current sequence.
:rtype: coral.DNA
'''
# TODO: put into NucleotideSequence class
copy = self.copy()
# Note: if sequence is double-stranded, swapping strand is basically
# (but not entirely) the same thing - gaps affect accuracy.
copy._sequence = reverse_complement(copy._sequence, 'dna')
copy._bottom = reverse_complement(copy._bottom, 'dna')
# Fix features (invert)
for feature in copy.features:
# Swap strand
if feature.strand == 1:
feature.strand = 0
else:
feature.strand = 1
# Swap start and stop
feature.start, feature.stop = (feature.stop, feature.start)
# Adjust start/stop to feature len
feature.start = len(copy) - feature.start
feature.stop = len(copy) - feature.stop
return copy
def tm(self, parameters='cloning'):
'''Find the melting temperature.
:param parameters: The tm method to use (cloning, santalucia98,
breslauer86)
:type parameters: str
'''
return coral.analysis.tm(self, parameters=parameters)
def to_ss(self):
'''Produce single stranded version of the current sequence.
:returns: The current sequence, converted to ssDNA.
:rtype: coral.DNA
'''
copy = self.copy()
# Do nothing if already single-stranded
if self.stranded == 'ss':
return copy
copy._bottom = '-' * len(copy)
for top, bottom in zip(copy.top(), reversed(copy.bottom())):
if top == bottom == '-':
raise ValueError('Coercing to single-stranded would ' +
'introduce a double stranded break.')
copy.stranded = 'ss'
return copy
def to_ds(self):
'''Produce double stranded version of the current sequence.
:returns: The current sequence, converted to dsDNA.
:rtype: coral.DNA
'''
# TODO: protect .stranded attribute if requiring setter method
copy = self.copy()
# Do nothing if already set
if self.stranded == 'ds':
return copy
# Find strand that's all gaps (if ss this should be the case)
reverse_seq = self.reverse_complement()
if all([char == '-' for char in self._sequence]):
copy._sequence = reverse_seq._bottom
elif all([char == '-' for char in self._bottom]):
copy._bottom = reverse_seq._sequence
copy.stranded = 'ds'
return copy
def top(self):
'''Return the raw string of the Watson (top) strand.
:returns: The Watson strand.
:rtype: str
'''
return self._sequence
def transcribe(self):
'''Transcribe into RNA.
:returns: An RNA sequence transcribed from the current DNA sequence.
:rtype: coral.RNA
'''
return coral.reaction.transcribe(self)
def __add__(self, other):
'''Add DNA together.
:param other: instance to be added to.
:type other: compatible sequence object (currently only DNA).
:returns: Concatenated DNA sequence.
:rtype: coral.DNA
:raises: Exception if either sequence is circular.
Exception if concatenating a sequence with overhangs would
create a discontinuity.
'''
if type(self) != type(other):
try:
other = type(self)(other)
except AttributeError:
raise TypeError('Cannot add {} to {}'.format(self, other))
if self.topology == 'circular' or other.topology == 'circular':
raise Exception('Can only add linear DNA.')
discontinuity = [False, False]
if len(self) != 0 and len(other) != 0:
# If either is empty, let things proceed anyways
discontinuity[0] = (self._sequence[-1] == '-' and
other._bottom[-1] == '-')
discontinuity[1] = (self._bottom[0] == '-' and
other._sequence[0] == '-')
for_discontinuity = discontinuity[0]
rev_discontinuity = discontinuity[1]
if for_discontinuity or rev_discontinuity:
msg = 'Concatenated DNA would be discontinuous.'
raise Exception(msg)
if self.stranded == 'ds' or other.stranded == 'ds':
stranded = 'ds'
else:
stranded = 'ss'
tops = self._sequence + other._sequence
bottoms = other._bottom + self._bottom
self_features = [feature.copy() for feature in self.features]
other_features = [feature.copy() for feature in other.features]
for feature in other_features:
feature.move(len(self))
features = self_features + other_features
new_instance = DNA(tops, bottom=bottoms, topology='linear',
stranded=stranded, run_checks=False,
features=features)
return new_instance
def __contains__(self, query):
'''Defines `query in sequence` operator.
:param query: query string or DNA sequence
:type query: str or coral.DNA
'''
# query in forward sequence
if super(DNA, self).__contains__(query, 'N'):
return True
# query in reverse complement
elif super(DNA, self.reverse_complement()).__contains__(query, 'N'):
return True
# query in neither
else:
return False
def __delitem__(self, index):
'''Delete sequence at an index.
:param index: index to delete
:type index: int
:returns: The current sequence with the base at `index` removed.
:rtype: coral.DNA
'''
super(DNA, self).__delitem__(index)
bottom_list = list(self._bottom[::-1])
del bottom_list[index]
self._bottom = ''.join(bottom_list)[::-1]
def __getitem__(self, key):
'''Index and slice sequences.
:param key: int or slice object for subsetting.
:type key: int or slice object
:returns: A subsequence matching the slice (`key`).
:rtype: coral.DNA
'''
# Use BaseSequence method to assign top strand and figure out features
if isinstance(key, slice):
if all([k is None for k in [key.start, key.stop, key.step]]):
# It's the copy slice operator ([:])
return self.copy()
else:
# The key is a normal slice
copy = super(DNA, self).__getitem__(key)
# bottom_key = slice(-key.stop if key.stop is not None
# else None,
# -key.start if key.start is not None
# else None,
# key.step)
# copy._bottom = copy._bottom[bottom_key]
copy._bottom = copy._bottom[::-1][key][::-1]
else:
# The key is an integer
copy = super(DNA, self).__getitem__(key)
copy._bottom = copy._bottom[-key]
copy.topology = 'linear'
return copy
def __eq__(self, other):
'''Define equality - sequences, topology, and strandedness are the
same.
:returns: Whether current sequence's (Watson and Crick), topology,
and strandedness are equivalent to those of another sequence.
:rtype: bool
'''
tops_equal = self._sequence == other._sequence
bottoms_equal = self._bottom == other._bottom
topology_equal = self.topology == other.topology
stranded_equal = self.stranded == other.stranded
if tops_equal and bottoms_equal and topology_equal and stranded_equal:
return True
else:
return False
def __repr__(self):
'''String to print when object is called directly.'''
parent = super(DNA, self).__repr__()
display_bases = 40
if len(self._sequence) < 90:
bottom = self._bottom[::-1]
else:
rev_bottom = self._bottom[::-1]
bottom = ''.join([rev_bottom[0:display_bases], ' ... ',
rev_bottom[-display_bases:]])
first_line = '{} {}DNA:'.format(self.topology, self.stranded)
to_print = '\n'.join([first_line, parent, bottom])
return to_print
def __setitem__(self, index, new_value):
'''Sets value at index to new value.
:param index: The index at which the sequence will be modified.
:type index: int
:param new_value: The new value at that index
:type new_value: str or coral.DNA
:returns: The current sequence with the sequence at `index` replaced
with `new_value`.
:rtype: coral.DNA
:raises: ValueError if `new_value` is '-'.
'''
new_value = str(new_value)
if new_value == '-':
raise ValueError('Cannot insert gap - split sequence instead.')
# setitem on top strand
super(DNA, self).__setitem__(index, new_value)
# setitem on bottom strand
if self.stranded == 'ds':
sequence_list = list(self._bottom)[::-1]
sequence_list[index] = str(DNA(new_value).reverse_complement())
self._bottom = ''.join(sequence_list[::-1])
else:
self._bottom = '-' * len(self)
class RestrictionSite(object):
'''Recognition site and properties of a restriction endonuclease.'''
def __init__(self, recognition_site, cut_site, name=None):
'''
:param recognition_site: Input sequence.
:type recognition_site: coral.DNA
:param cut_site: 0-indexed indices where DNA is nicked (top, then
bottom strand). For an n-sized recognition site, there
are n + 1 positions at which to cut.
:type cut_site: 2-tuple.
:param name: Identifier of this restriction site
:type name: str
:returns: instance of coral.RestrictionSite
'''
self.recognition_site = recognition_site # require DNA object
# cutsite is indexed to leftmost base of restriction site
self.cut_site = cut_site # tuple of where top/bottom strands are cut
# optional name
self.name = name
def is_palindrome(self):
'''Report whether sequence is palindromic.
:returns: Whether the restriction site is a palindrome.
:rtype: bool
'''
return self.recognition_site.is_palindrome()
def cuts_outside(self):
'''Report whether the enzyme cuts outside its recognition site.
Cutting at the very end of the site returns True.
:returns: Whether the enzyme will cut outside its recognition site.
:rtype: bool
'''
for index in self.cut_site:
if index < 0 or index > len(self.recognition_site) + 1:
return True
return False
def copy(self):
'''Return copy of the restriction site.
:returns: A safely editable copy of the current restriction site.
:rtype: coral.RestrictionSite
'''
return RestrictionSite(self.recognition_site, self.cut_site,
self.name)
def __repr__(self):
'''Represent a restriction site.'''
site = self.recognition_site
cut_symbols = ('|', '|')
if not self.cuts_outside():
top_left = str(site[0:self.cut_site[0]])
top_right = str(site[self.cut_site[0]:])
top_w_cut = top_left + cut_symbols[0] + top_right
bottom_left = site[0:self.cut_site[1]].reverse_complement()
bottom_left = str(bottom_left)[::-1]
bottom_right = site[self.cut_site[1]:].reverse_complement()
bottom_right = str(bottom_right)[::-1]
bottom_w_cut = bottom_left + cut_symbols[1] + bottom_right
else:
return '\n'.join([site.top() + ' {}'.format(self.cut_site),
site.bottom()])
return '\n'.join([top_w_cut, bottom_w_cut])
def __len__(self):
'''Defines len operator.
:returns: Length of the recognition site.
:rtype: int
'''
return len(self.recognition_site)
class Primer(object):
'''A DNA primer - ssDNA with tm, anneal, and optional overhang.'''
def __init__(self, anneal, tm, overhang=None, name='', note=''):
'''
:param anneal: Annealing sequence
:type anneal: coral.DNA
:param overhang: Overhang sequence
:type overhang: coral.DNA
:param tm: melting temperature
:type tm: float
:param name: Optional name of the primer. Used when writing to csv with
seqio.write_primers.
:type name: str
:param note: Optional description to associate with the primer. Used
when writing to csv with seqio.write_primers.
:type note: str
:returns: coral.Primer instance.
'''
self.tm = tm
self.anneal = anneal.to_ss()
if overhang is not None:
self.overhang = overhang.to_ss()
else:
self.overhang = DNA('', stranded='ss')
self.name = name
self.note = note
def copy(self):
'''Generate a Primer copy.
:returns: A safely-editable copy of the current primer.
:rtype: coral.DNA
'''
return type(self)(self.anneal, self.tm, overhang=self.overhang,
name=self.name, note=self.note)
def primer(self):
'''Produce full (overhang + annealing sequence) primer sequence.
:returns: The DNA sequence of the primer.
:rtype: coral.DNA
'''
return self.overhang + self.anneal
def __repr__(self):
'''Representation of a primer.'''
if self.overhang:
return 'Primer: {} Tm: {:.2f}'.format(self.overhang.top().lower() +
self.anneal.top(), self.tm)
else:
return 'Primer: {} Tm: {:.2f}'.format(self.anneal.top(), self.tm)
def __str__(self):
'''Coerce DNA object to string.
:returns: A string of the full primer sequence.
:rtype: str
'''
return str(self.primer())
def __eq__(self, other):
'''Define equality - sequences, topology, and strandedness are the
same.
:returns: Whether two primers have the same overhang and annealing
sequence.
:rtype: bool
'''
anneal_equal = self.anneal == other.anneal
overhang_equal = self.overhang == other.overhang
if anneal_equal and overhang_equal:
return True
else:
return False
def __len__(self):
'''Define len operator.
:returns: The length of the full primer sequence.
:rtype: int
'''
return len(self.primer())
|
994,639 | 50845c1864c535ee49987719313cc05f6fc96d92 | # =============================================================================
# protocol
#
# Copyright (c) 2014, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
import pexpect
#INVALID_INPUT = "Invalid input detected"
PASS = "[P|p]assword:\s*"
XR_PROMPT = re.compile('(\w+/\w+/\w+/\w+:.*?)(\([^()]*\))?#')
USERNAME = "[U|u]sername:\s?|\nlogin:\s?"
PERMISSION_DENIED = "Permission denied"
AUTH_FAILED = "Authentication failed|not authorized|Login incorrect"
SHELL_PROMPT = "\$\s?|>\s?|#\s?|AU_PROMPT"
CONNECTION_REFUSED = "Connection refused"
RESET_BY_PEER = "reset by peer|closed by foreign host"
# Error when the hostname can't be resolved or there is
# network reachability timeout
UNABLE_TO_CONNECT = "nodename nor servname provided, or not known" \
"|Unknown host|[Operation|Connection] timed out"
class Protocol(object):
def __init__(
self,
controller,
node_info,
account_manager=None,
logfile=None,
debug=5
):
self.protocol = node_info.protocol
self.hostname = node_info.hostname
self.port = node_info.port
self.password = node_info.password
self.ctrl = controller
self.logfile = logfile
self.account_manager = account_manager
username = node_info.username
if not username and self.account_manager:
username = self.account_manager.get_username(self.hostname)
self.username = username
self.debug = debug
def _spawn_session(self, command):
self.ctrl._dbg(10, "Starting session: '{}'".format(command))
if self.ctrl._session and self.ctrl.isalive():
self.ctrl.sendline(command)
else:
self.ctrl._session = pexpect.spawn(
command,
maxread=50000,
searchwindowsize=None,
echo=False
)
self.ctrl._session.logfile_read = self.logfile
def connect(self):
raise NotImplementedError("Connection method not implemented")
def _dbg(self, level, msg):
self.ctrl._dbg(level, "{}: {}".format(self.protocol, msg))
def _acquire_password(self):
password = self.password
if not password:
if self.account_manager:
self.ctrl._dbg(
20,
"{}: {}: Acquiring password for {} "
"from system KeyRing".format(
self.protocol, self.hostname, self.username)
)
password = self.account_manager.get_password(
self.hostname,
self.username,
interact=True
)
if not password:
self.ctrl._dbg(
30,
"{}: {}: Password for {} does not exists "
"in KeyRing".format(
self.protocol, self.hostname, self.username)
)
return password
|
994,640 | a8436cfc5e85bec088b3547c810c801178d717f8 | is_has_name = True
name = 'Nax' if is_has_name else 'Empty'
print(name)
IS_ONE = False
number = 1 if IS_ONE else 2
print(number)
word = 'слово'
result = []
for i in range(len(word)):
# if i % 2 != 0:
# letter = word[i].lower()
# else:
# letter = word[i].upper()
#
letter = word[i].lower() if i % 2 != 0 else word[i].upper()
result.append(letter)
result = ''.join(result)
print(result)
password = input('Введите пароль')
print('Войти' if password == 'secret' else 'Вход запрещен!') |
994,641 | 0970ccdc0ea116a50b973a526e5bacfc0879141c | import numpy as np
from root_regula_falsi import *
osf1 = 14.621
T1 = 0.0
osf2 = 6.413
T2 = 40.0
def calc_osf(T):
Ta = T + 273.15
arg = -139.34411 + 1.575701e5/Ta - 6.642308e7/Ta**2 + 1.2438e10/Ta**3 - 8.621949e11/Ta**4
return np.exp(arg)
def solve_for_T(osf):
def f(T):
Ta = T + 273.15
return -np.log(osf) - 139.34411 + 1.575701e5/Ta - 6.642308e7/Ta**2 + \
1.2438e10/Ta**3 - 8.621949e11/Ta**4
# Initial guess
T1 = 0.0
T2 = 40.0
return root_regula_falsi(f, T1, T2)
Npoints = 10
osf_vals = np.linspace(7.0, 14.0, Npoints)
T = np.zeros(Npoints)
for i,o in enumerate(osf_vals):
T[i] = solve_for_T(o)
import matplotlib.pyplot as plt
plt.plot(osf_vals, T, marker="o")
plt.grid(True)
plt.xlabel("osf")
plt.ylabel("T")
plt.savefig("IMG_exe_5_18_funcplot.pdf") |
994,642 | 27145aaf63510bf85766b68af3fcab5d5c9249a0 | # coding = <utf-8>
""" 버튼 위젯
(ref) https://youtu.be/bKPIcoou9N8?t=526
"""
import os
import os.path as osp
from tkinter import Tk, Button, PhotoImage
root = Tk()
root.title("My GUI")
btn1 = Button(root, text="버튼1") # 버튼 객체 초기화
btn1.pack() # mainloop() 에 버튼이 표출되도록 함
btn2 = Button(root, padx=5, pady=10, text='버튼2') # 디폴트 버튼 박스 크기에서 패딩(padding)
btn2.pack()
btn3 = Button(root, padx=10, pady=5, text='버튼3')
btn3.pack()
btn4 = Button(root, width=10, height=3, text='버튼4') # 고정 박스 크기를 직접 설정
btn4.pack()
btn5 = Button(root, fg='red', bg="yellow", text='버튼5') # foreground, background 색상
btn5.pack()
img_path = osp.join(os.getcwd(), 'steps', 'images', 'button.png') # 이미지로 버튼 만들기
photo = PhotoImage(file=img_path)
btn6 = Button(root, image=photo)
btn6.pack()
""" 버튼 클릭 후 기능 실행
"""
def btncmd():
print("버튼이 클릭되었습니다.")
btn7 = Button(root, text="동작하는 버튼", command=btncmd)
btn7.pack()
root.mainloop() |
994,643 | c4eeb5319ba47e9d4fef2211d1ed64d5285491fb | # Generated by Django 3.2.5 on 2021-09-02 17:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('LeaveApp', '0014_auto_20210902_1249'),
]
operations = [
migrations.RenameField(
model_name='leaverequest',
old_name='cancel_leave',
new_name='cancel_reason',
),
]
|
994,644 | 513309d5680748632747529766a69a6c659b7b57 | import aioredis
from aioredis import Redis
from excars import config
async def setup():
return await aioredis.create_redis_pool(
config.REDIS_HOST, db=config.REDIS_DB, minsize=config.REDIS_POOL_MIN, maxsize=config.REDIS_POOL_MAX
)
async def stop(redis_cli: Redis):
redis_cli.close()
await redis_cli.wait_closed()
|
994,645 | 373ee3dab8ce61b9034522442bdbd6565cf9c424 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Exibe varios padroes de contadores.
@author: Prof. Diogo SM
"""
contador = 0
print("10 iteracoes com passo 1")
while contador < 10:
print(contador, end=" ")
contador += 1
print()
contador = 0
print("11 iteracoes com passo 1")
while contador <= 10:
print(contador, end=" ")
contador += 1
print()
contador = 1
print("30 iteracoes com passo 1")
while contador < 30:
print(contador, end=" ")
contador += 1
print()
contador = 10
print("10 iteracoes com passo -1")
while contador > 0:
print(contador, end=" ")
contador -= 1
print()
contador = 10
print("5 iteracoes com passo -2")
while contador > 0:
print(contador, end=" ")
contador -= 2
|
994,646 | e64b53e1bbbd9f769e9da049bab02bca115b4321 | import fnmatch
thing = cmds.ls(sl=True)
getProject = cmds.workspace(expandName = 'relativePathName')
getAssetPath = getProject.split('build/')
getAssetName = getAssetPath[1].split('/m')
fullPath = (getAssetPath[0] + 'build/'+ getAssetName[0] + '/m_model/textures/wip/hi/')
refs = cmds.ls(type='reference')
refs.remove('sharedReferenceNode')
for i in refs:
rFile = cmds.referenceQuery(i, f=True)
if cmds.referenceQuery(rFile, il=True):
cmds.file(rFile, importReference=True)
for tex in thing:
getSG = cmds.listConnections(tex, p=True, s=True, d=False)
getSourceName = tex
connectionDict = {}
getTexDirectory = cmds.getAttr('%s.texDirectory' % tex)
#print getTexDirectory
getTexDirectory = getTexDirectory.replace('call_of_duty_cod_eclipse_J83580', 'marvel_strike_force_J405577')
getTexRes = cmds.getAttr('%s.texResolution' % tex)
#print getTexRes
compilePath = (getTexDirectory + '/wip/' + getTexRes + '/')
getTexName = tex.split(':')[1]
combineFullTexturePath = (compilePath + getTexName + '_u<U>_v<V>.exr')
fileTexture = cmds.shadingNode('file', asTexture=True, name=tex)
cmds.select(tex)
for attr in getSG:
getDest = cmds.listConnections(attr, d=True, s=False, p=True)
getSourceNameWild = '*' + getSourceName + '*'
getBadConnect = fnmatch.filter(getDest,getSourceNameWild)
for x in getBadConnect:
getDest.remove(x)
connectionDict[attr]=getDest
for x,y in connectionDict.items():
if y != []:
for obj in y:
isoPlug = x.split('.')[1]
newTexture = (fileTexture + '.' + isoPlug)
cmds.connectAttr(newTexture,obj, force=True)
twoD = cmds.shadingNode('place2dTexture', asUtility=True, name='%sPlace2d' % tex)
cmds.connectAttr('%s.outUV' % (twoD), '%s.uv' % (fileTexture))
cmds.setAttr('%s.fileTextureName' % (fileTexture), combineFullTexturePath, type='string')
|
994,647 | db93a7dd105430524758263e560d12ea86177356 | FollowedUserNames={'UserName': 'UserName', 'Date': [0, 0, 0]},{'UserName': 'lilarshiaw', 'Date': [2020, 7, 5]} , {'UserName': 'nafc102030', 'Date': [2020, 7, 5]} , {'UserName': 'amir71304', 'Date': [2020, 7, 5]} , {'UserName': 'reza._.zomorodiii', 'Date': [2020, 7, 5]} , {'UserName': 'amir_h_keshavarziyan', 'Date': [2020, 7, 5]} , {'UserName': 'gamer.htm061', 'Date': [2020, 7, 5]} , {'UserName': 'alib13.81', 'Date': [2020, 7, 5]} , {'UserName': 'mr.aly82', 'Date': [2020, 7, 5]} , {'UserName': 'fortnite_clipstr', 'Date': [2020, 7, 5]} , {'UserName': 'saleh._1386', 'Date': [2020, 7, 5]} , {'UserName': 'ayda._.sami', 'Date': [2020, 7, 5]} , {'UserName': 'aryant.k', 'Date': [2020, 7, 5]} , {'UserName': 'ownerh225', 'Date': [2020, 7, 5]} , {'UserName': 'most.afa71', 'Date': [2020, 7, 5]} , {'UserName': 'hsen5283', 'Date': [2020, 7, 5]} , {'UserName': 'persian.sporting', 'Date': [2020, 7, 5]} , {'UserName': 'milish98', 'Date': [2020, 7, 5]} , {'UserName': 'itz._.psych0', 'Date': [2020, 7, 5]} , {'UserName': 'kamali.barbod', 'Date': [2020, 7, 5]} , {'UserName': 'armin_mollahoseini', 'Date': [2020, 7, 5]} , {'UserName': 'naznin.flore.p185t', 'Date': [2020, 7, 5]} , {'UserName': 'lydw377', 'Date': [2020, 7, 5]} , {'UserName': 'itsbrdia', 'Date': [2020, 7, 5]} , {'UserName': 'arya_gh72meymand', 'Date': [2020, 7, 5]} , {'UserName': 'm_ata_a_king', 'Date': [2020, 7, 5]} , {'UserName': 'amiir.jfri', 'Date': [2020, 7, 5]} , {'UserName': 'techno_funny', 'Date': [2020, 7, 5]} , {'UserName': 'nami13gh', 'Date': [2020, 7, 5]} , {'UserName': 'amiiinn49', 'Date': [2020, 7, 5]} , {'UserName': 'amir_0987654321000', 'Date': [2020, 7, 5]} , {'UserName': 'ali._.br_', 'Date': [2020, 7, 5]} , {'UserName': 'matin.alavian', 'Date': [2020, 7, 5]} , {'UserName': 'dadashila', 'Date': [2020, 7, 5]} , {'UserName': 'ata._.pro._.gamer', 'Date': [2020, 7, 5]} , {'UserName': 'amir_1k384', 'Date': [2020, 7, 5]} , {'UserName': '_mohammad__ghodsinejad_sh10', 'Date': [2020, 7, 5]} , {'UserName': 'sina_banitalebi1', 'Date': [2020, 7, 5]} , {'UserName': 'rezamoradi24680', 'Date': [2020, 7, 5]} , {'UserName': 'amir.mehrara83', 'Date': [2020, 7, 5]} , {'UserName': 'alirexaaam', 'Date': [2020, 7, 5]} , {'UserName': '_kingkord_', 'Date': [2020, 7, 5]} , {'UserName': 'arshiayadgarazadi', 'Date': [2020, 7, 5]} , {'UserName': 'rza.abdo', 'Date': [2020, 7, 5]} , {'UserName': 'amirkheirmandi1384', 'Date': [2020, 7, 5]} , {'UserName': 'yo_montego', 'Date': [2020, 7, 14]} , {'UserName': 'majidmmg', 'Date': [2020, 7, 14]} , {'UserName': 'amirmhmd5605', 'Date': [2020, 7, 14]} , {'UserName': 'mehrad.khoshsolat', 'Date': [2020, 7, 14]} , {'UserName': 'tripplexwolve', 'Date': [2020, 7, 14]} , {'UserName': 'matin.gh.z', 'Date': [2020, 7, 14]} , {'UserName': 'hesam_ma20', 'Date': [2020, 7, 14]} , {'UserName': 'mhdz.2002', 'Date': [2020, 7, 14]} , {'UserName': 'mohammad37amin', 'Date': [2020, 7, 14]} , {'UserName': 'amirhosein_irannejad', 'Date': [2020, 7, 14]} , {'UserName': 'fifa20_cup', 'Date': [2020, 7, 14]} , {'UserName': 'b._.rdia', 'Date': [2020, 7, 14]} , {'UserName': 'java.d4443', 'Date': [2020, 7, 14]} , {'UserName': 'the_persiangamerrr', 'Date': [2020, 7, 14]} , {'UserName': 'meme_._games', 'Date': [2020, 7, 14]} , {'UserName': 'refighpooya', 'Date': [2020, 7, 14]} , {'UserName': 'ebiruo', 'Date': [2020, 7, 14]} , {'UserName': 'afraa.575', 'Date': [2020, 7, 14]} , {'UserName': 'hassanhamiidii', 'Date': [2020, 7, 14]} , {'UserName': 'hosein.n.s.15', 'Date': [2020, 7, 14]} , {'UserName': 'm.r.reza.r.m.1383', 'Date': [2020, 7, 14]} , {'UserName': 'mahbob13736', 'Date': [2020, 7, 14]} , {'UserName': 'babakfaraji.kh', 'Date': [2020, 7, 14]} , {'UserName': 'mh5958222', 'Date': [2020, 7, 14]} , {'UserName': 'gh.archer', 'Date': [2020, 7, 14]} , {'UserName': 'hooman_pgg', 'Date': [2020, 7, 14]} , {'UserName': 'aliii_sh86', 'Date': [2020, 7, 14]} , {'UserName': 'ar.ian4522', 'Date': [2020, 7, 14]} , {'UserName': '09irann', 'Date': [2020, 7, 14]} , {'UserName': 'its._tomboy', 'Date': [2020, 7, 14]} , |
994,648 | 9281907334138a7c6722c2bd62a105ec0e493c09 | from moviepy.editor import concatenate_videoclips, VideoFileClip
def concatenate(video_clip_paths, output_path, method="compose"):
"""Concatenates several video files into one video file
and save it to `output_path`. Note that extension (mp4, etc.) must be added to `output_path`
`method` can be either 'compose' or 'reduce':
`reduce`: Reduce the quality of the video to the lowest quality on the list of `video_clip_paths`.
`compose`: type help(concatenate_videoclips) for the info"""
# create VideoFileClip object for each video file
clips = [VideoFileClip(c) for c in video_clip_paths]
if method == "reduce":
# calculate minimum width & height across all clips
min_height = min([c.h for c in clips])
min_width = min([c.w for c in clips])
# resize the videos to the minimum
clips = [c.resize(newsize=(min_width, min_height)) for c in clips]
# concatenate the final video
final_clip = concatenate_videoclips(clips)
elif method == "compose":
# concatenate the final video with the compose method provided by moviepy
final_clip = concatenate_videoclips(clips, method="compose")
# write the output video file
final_clip.write_videofile(output_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Simple Video Concatenation script in Python with MoviePy Library")
parser.add_argument("-c", "--clips", nargs="+",
help="List of audio or video clip paths")
parser.add_argument("-r", "--reduce", action="store_true",
help="Whether to use the `reduce` method to reduce to the lowest quality on the resulting clip")
parser.add_argument("-o", "--output", help="Output file name")
args = parser.parse_args()
clips = args.clips
output_path = args.output
reduce = args.reduce
method = "reduce" if reduce else "compose"
concatenate(clips, output_path, method)
|
994,649 | 61aa122acf7db61958d4b43db9f308f381aef736 | import scipy.io as sio
from function import *
mat_content = sio.loadmat('HW3Data.mat')
Vocabulary = mat_content['Vocabulary']
XTrain = mat_content['XTrain'].toarray()
yTrain = mat_content['yTrain'].flatten()
XTest = mat_content['XTest'].toarray()
yTest = mat_content['yTest'].flatten()
XTrainSmall = mat_content['XTrainSmall'].toarray()
yTrainSmall = mat_content['yTrainSmall'].flatten()
D = NB_XGivenY(XTrain, yTrain)
p = NB_YPrior(yTrain)
yHatTrain = NB_Classify(D, p, XTrain)
yHatTest = NB_Classify(D, p, XTest)
trainError = ClassificationError(yHatTrain, yTrain);
testError = ClassificationError(yHatTest, yTest);
trainError
testError
D = NB_XGivenY(XTrainSmall, yTrainSmall)
p = NB_YPrior(yTrainSmall)
yHatTrainSmall = NB_Classify(D, p, XTrainSmall)
yHatTestSmall = NB_Classify(D, p, XTest)
trainErrorSmall = ClassificationError(yHatTrainSmall, yTrainSmall);
testErrorSmall = ClassificationError(yHatTestSmall, yTest);
trainErrorSmall
testErrorSmall
TopOccurence(XTrain, yTrain, Vocabulary, k = 5)
TopDiscriminate(XTrain, yTrain, Vocabulary, k = 5)
|
994,650 | 130ec6f4858412ae092da706d3070b27929e43d2 | #!/usr/bin/env python
import os
import sys
from distutils.util import subst_vars
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from meta import DIST_META_KEYS, import_dist_meta, get_py_version
from errors import LocationError, MetadataError
__all__ = [
'FREEZE_SCHEME',
'SCHEME_KEYS',
'walk_tree',
'locate_distribution',
'freeze_distribution',
'locate_dist_section',
'freeze_dist_section',
]
def _gen_freeze_scheme():
""" Generate scheme to freeze distribution.
"""
freeze_scheme = {}
for key in SCHEME_KEYS:
paths = []
for scheme_name, install_scheme in INSTALL_SCHEMES.iteritems():
val = install_scheme[key]
if scheme_name == 'unix_home':
val = val.replace('$base', '$home', 1)
else:
val = val.replace('$base', '$prefix', 1)
val = val.replace('$platbase', '$exec_prefix', 1)
paths.append(val)
freeze_scheme[key] = paths
return freeze_scheme
FREEZE_SCHEME = _gen_freeze_scheme()
def walk_tree(top):
""" List the whole directory tree down from the top.
"""
nodes = [top]
for dirpath, dirnames, filenames in os.walk(top):
for dirname in dirnames:
nodes.append(os.path.join(dirpath, dirname))
for filename in filenames:
nodes.append(os.path.join(dirpath, filename))
return nodes
def _expand_prefix(prefix, configs):
""" Expand variables in the prefix.
"""
return subst_vars(prefix, configs)
def _verify_prefix(prefix, files):
""" Verify that every file exists with the specified prefix.
"""
for f in files:
f = os.path.join(prefix, f)
if not os.path.exists(f):
return False
else:
return True
def locate_dist_section(section, dist_meta):
""" Find and return the location of the specified section.
"""
def purelib_path_gen():
paths = FREEZE_SCHEME['purelib']
paths.extend(sys.path)
return paths
def platlib_path_gen():
# TODO: more available paths
paths = FREEZE_SCHEME['platlib']
return paths
def headers_path_gen():
# TODO: more available paths
paths = FREEZE_SCHEME['headers']
return paths
def scripts_path_gen():
paths = FREEZE_SCHEME['scripts']
if os.environ.has_key('PATH'):
paths.extend(os.environ['PATH'].split(":"))
if os.environ.has_key('HOME'):
paths.append(os.path.join(os.environ['HOME'], 'bin'))
return paths
def data_path_gen():
# TODO: more available paths
paths = FREEZE_SCHEME['data']
return paths
if section not in SCHEME_KEYS:
raise LocationError("illegal section name '%s'." % section)
pathvar = dist_meta.get('%s_path' % section, None)
if pathvar:
paths = [pathvar]
else:
pathgen = locals()['%s_path_gen' % section]
paths = pathgen()
for prefix in paths:
prefix = _expand_prefix(prefix, dist_meta)
status = _verify_prefix(prefix, dist_meta[section])
if status:
return prefix
else:
raise LocationError("cann't locate section '%s'." % section)
def freeze_dist_section(section, dist_meta):
""" List all files belong to the specified section.
"""
location = locate_dist_section(section, dist_meta)
outfiles = []
for f in dist_meta.get(section, []):
f = os.path.join(location, f)
if f not in outfiles:
outfiles.extend(walk_tree(f))
return location, outfiles
def freeze_distribution(dist_name, dist_version, **attrs):
""" List all files belong to the specified distribution.
"""
for key in attrs.iterkeys():
if key not in DIST_META_KEYS:
raise AttributeError("unexpected keyword argument '%s'." % key)
try:
dist_meta = import_dist_meta(dist_name, dist_version)
dist_meta.update(attrs)
except ImportError:
raise MetadataError("metadata of '%s-%s' not found." % \
(dist_name, dist_version))
dist_files = []
dist_scheme = {}
for key in SCHEME_KEYS:
location, outfiles = freeze_dist_section(key, dist_meta)
dist_files.extend(outfiles)
dist_scheme[key] = location
return dist_scheme, dist_files
|
994,651 | 6f000c9944026d6d3107e21aa20463e6ad286bb5 | #!/usr/bin/env python
import paramiko
import sys, os, string, threading
#user-selected command
cmd = "w"
def command(host):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username='ubuntu', password='PASSWORD')
stdin, stdout, stderr = client.exec_command(cmd)
for line in stdout:
print line.strip('\n')
client.close()
def main():
hosts = sys.argv[1].split(",")
threads = []
for h in hosts:
t = threading.Thread(target=command, args=(h,))
t.start()
threads.append(t)
for t in threads:
t.join()
main()
|
994,652 | 5857bf5fc665db1badc113863e98e57f9ea952b7 | # # Задание - 1
# # Создайте функцию, принимающую на вход Имя, возраст и город проживания человека
# # Функция должна возвращать строку вида "Василий, 21 год(а), проживает в городе Москва"
#
names = input('В ведите имя: ')
ages = input('В ведите возраст: ')
city = input('В ведите город: ')
def messege( name , ages , city):
return f'{name} , {ages} год(а), проживает в городе {city}'
print(messege(names,ages,city))
#
#
# # Задание - 2
# # Создайте функцию, принимающую на вход 3 числа, и возвращающую наибольшее из них
#
def max_numbers(*args):
return max(*args)
print (max_numbers(8, 15 ,3))
#
# # Задание - 3
# # Создайте функцию, принимающую неограниченное количество строковых аргументов,
# # верните самую длинную строку из полученных аргументов
#
#
#
def long_string(*args):
return max(args, key=len)
print(long_string('Hello', 'banquet', 'da', 'compact', 'cosy', 'coachman'))
#
#
#
#
# # Задание - 1
# # Вам даны 2 списка одинаковой длины, в первом списке имена людей, во втором зарплаты,
# # вам необходимо получить на выходе словарь, где ключ - имя, значение - зарплата.
# # Запишите результаты в файл salary.txt так, чтобы на каждой строке было 2 столбца,
# # столбцы разделяются пробелом, тире, пробелом. в первом имя, во втором зарплата, например: Vasya - 5000
# # После чего прочитайте файл, выведите построчно имя и зарплату минус 13% (налоги ведь),
# # Есть условие, не отображать людей получающих более зарплату 500000, как именно
# # выполнить условие решать вам, можете не писать в файл
# # можете не выводить, подумайте какой способ будет наиболее правильным и оптимальным,
# # если скажем эти файлы потом придется передавать.
# # Так же при выводе имя должно быть полностью в верхнем регистре!
# # Подумайте вспоминая урок, как это можно сделать максимально кратко, используя возможности языка Python.
#
salary_file = open('salary.txt', 'w', encoding='UTF-8')
names = ['Petrov', 'Ivanov', 'Sidorov']
salaries = [30000, 80000, 10000]
def return_dic(names, salaries):
return dict(zip(names, salaries))
def write_to_file():
with open('salary.txt', 'w', encoding='UTF-8') as file:
for name, salary in return_dic(names, salaries).items():
file.write(f'{name} - {salary}\n')
write_to_file()
salary_file = open('salary.txt', 'r', encoding='UTF-8')
for line in salary_file:
name, dash, salary = line.split()
if int(salary) <= 50000:
income_tax = int(salary) * 0.87
print(str(name.upper()), dash, int(income_tax))
salary_file.close()
|
994,653 | 25a9790d8c4f343d7a64d54cbda6af01164eec0d | """
【问题描述】从键盘输入非0整数,以输入0为输入结束标志,求平均值,统计正数负数个数
【输入形式】
每个整数一行。最后一行是0,表示输入结束。
【输出形式】
输出三行。
第一行是平均值。第二行是正数个数。第三行是负数个数。
【样例输入】
1
1
1
0
【样例输出】
1
3
0
"""
nums = []
zheng = 0
fu = 0
while True:
number = int(input())
if number == 0:
break
else:
nums.append(number)
for i in range(nums.__len__()):
if nums[i] < 0:
fu += 1
else:
zheng += 1
print(sum(nums)/nums.__len__())
print(zheng)
print(fu) |
994,654 | 9d267c6af3b242e53dab0c4699dfbf22fef90d33 | #!python
# \author Hans J. Johnson
#
# Now that all supported compilers simply
# use exactly one function signature (i.e.
# namely the one provided in the std:: namespace)
# there is no need to use the vcl_ aliases.
import os
import sys
from collections import OrderedDict
if len(sys.argv) != 2:
usage = r"""
INCORRECT USAGE:
{0}
USAGE:
python {1} source_file_to_modernize
Examples:
SRC_BASE_DIR=~/MYSRC/Submodule
for ext in ".h" ".cxx" ".cpp" ".hxx" ".hpp" ".txx"; do
find ${{SRC_BASE_DIR}} -type f -name "*${{ext}}" -exec python Utilities/Maintenance/VCL_ModernizeNaming.py {{}} \;
done
""".format(
sys.argv, sys.argv[0]
)
print(usage)
sys.exit(-1)
# slight modification from grep command
info_for_conversion = """
vcl_algorithm.h,vcl_adjacent_find,std::adjacent_find
vcl_algorithm.h,vcl_and,std::and
vcl_algorithm.h,vcl_binary,std::binary
vcl_algorithm.h,vcl_binary_search,std::binary_search
vcl_algorithm.h,vcl_copy,std::copy
vcl_algorithm.h,vcl_copy_,std::copy_
vcl_algorithm.h,vcl_count,std::count
vcl_algorithm.h,vcl_count_if,std::count_if
vcl_algorithm.h,vcl_equal,std::equal
vcl_algorithm.h,vcl_equal_range,std::equal_range
vcl_algorithm.h,vcl_fill,std::fill
vcl_algorithm.h,vcl_fill_n,std::fill_n
vcl_algorithm.h,vcl_find,std::find
vcl_algorithm.h,vcl_find_end,std::find_end
vcl_algorithm.h,vcl_find_first_of,std::find_first_of
vcl_algorithm.h,vcl_find_if,std::find_if
vcl_algorithm.h,vcl_for_each,std::for_each
vcl_algorithm.h,vcl_generate,std::generate
vcl_algorithm.h,vcl_generate_n,std::generate_n
vcl_algorithm.h,vcl_generators_,std::generators_
vcl_algorithm.h,vcl_heap,std::heap
vcl_algorithm.h,vcl_includes,std::includes
vcl_algorithm.h,vcl_inplace_merge,std::inplace_merge
vcl_algorithm.h,vcl_iter_swap,std::iter_swap
vcl_algorithm.h,vcl_lexicographical_compare,std::lexicographical_compare
vcl_algorithm.h,vcl_lower_bound,std::lower_bound
vcl_algorithm.h,vcl_make_heap,std::make_heap
vcl_algorithm.h,vcl_max,std::max
vcl_algorithm.h,vcl_max_element,std::max_element
vcl_algorithm.h,vcl_merge,std::merge
vcl_algorithm.h,vcl_merge_,std::merge_
vcl_algorithm.h,vcl_min,std::min
vcl_algorithm.h,vcl_min_element,std::min_element
vcl_algorithm.h,vcl_mismatch,std::mismatch
vcl_algorithm.h,vcl_next_permutation,std::next_permutation
vcl_algorithm.h,vcl_nth_element,std::nth_element
vcl_algorithm.h,vcl_partial_sort,std::partial_sort
vcl_algorithm.h,vcl_partial_sort_copy,std::partial_sort_copy
vcl_algorithm.h,vcl_partition,std::partition
vcl_algorithm.h,vcl_partitions_,std::partitions_
vcl_algorithm.h,vcl_pop_heap,std::pop_heap
vcl_algorithm.h,vcl_prev_permutation,std::prev_permutation
vcl_algorithm.h,vcl_push_heap,std::push_heap
vcl_algorithm.h,vcl_random_shuffle,std::random_shuffle
vcl_algorithm.h,vcl_remove,std::remove
vcl_algorithm.h,vcl_remove_copy,std::remove_copy
vcl_algorithm.h,vcl_remove_copy_if,std::remove_copy_if
vcl_algorithm.h,vcl_remove_if,std::remove_if
vcl_algorithm.h,vcl_replace,std::replace
vcl_algorithm.h,vcl_replace_copy,std::replace_copy
vcl_algorithm.h,vcl_replace_copy_if,std::replace_copy_if
vcl_algorithm.h,vcl_replace_if,std::replace_if
vcl_algorithm.h,vcl_reverse,std::reverse
vcl_algorithm.h,vcl_reverse_copy,std::reverse_copy
vcl_algorithm.h,vcl_rotate,std::rotate
vcl_algorithm.h,vcl_rotate_copy,std::rotate_copy
vcl_algorithm.h,vcl_search,std::search
vcl_algorithm.h,vcl_search_n,std::search_n
vcl_algorithm.h,vcl_set_difference,std::set_difference
vcl_algorithm.h,vcl_set_intersection,std::set_intersection
vcl_algorithm.h,vcl_set_symmetric_difference,std::set_symmetric_difference
vcl_algorithm.h,vcl_set_union,std::set_union
vcl_algorithm.h,vcl_sort,std::sort
vcl_algorithm.h,vcl_sort_,std::sort_
vcl_algorithm.h,vcl_sort_heap,std::sort_heap
vcl_algorithm.h,vcl_stable_partition,std::stable_partition
vcl_algorithm.h,vcl_stable_sort,std::stable_sort
vcl_algorithm.h,vcl_swap,std::swap
vcl_algorithm.h,vcl_swap_,std::swap_
vcl_algorithm.h,vcl_swap_ranges,std::swap_ranges
vcl_algorithm.h,vcl_transform,std::transform
vcl_algorithm.h,vcl_unique,std::unique
vcl_algorithm.h,vcl_unique_copy,std::unique_copy
vcl_algorithm.h,vcl_upper_bound,std::upper_bound
vcl_bitset.h,vcl_bitset,std::bitset
vcl_cctype.h,vcl_isalnum,std::isalnum
vcl_cctype.h,vcl_isalpha,std::isalpha
vcl_cctype.h,vcl_iscntrl,std::iscntrl
vcl_cctype.h,vcl_isdigit,std::isdigit
vcl_cctype.h,vcl_isgraph,std::isgraph
vcl_cctype.h,vcl_islower,std::islower
vcl_cctype.h,vcl_isprint,std::isprint
vcl_cctype.h,vcl_ispunct,std::ispunct
vcl_cctype.h,vcl_isspace,std::isspace
vcl_cctype.h,vcl_isupper,std::isupper
vcl_cctype.h,vcl_isxdigit,std::isxdigit
vcl_cctype.h,vcl_tolower,std::tolower
vcl_cctype.h,vcl_toupper,std::toupper
vcl_cmath.h,vcl_abs,std::abs
vcl_cmath.h,vcl_acos,std::acos
vcl_cmath.h,vcl_asin,std::asin
vcl_cmath.h,vcl_atan,std::atan
vcl_cmath.h,vcl_atan2,std::atan2
vcl_cmath.h,vcl_ceil,std::ceil
vcl_cmath.h,vcl_cos,std::cos
vcl_cmath.h,vcl_cosh,std::cosh
vcl_cmath.h,vcl_exp,std::exp
vcl_cmath.h,vcl_fabs,std::fabs
vcl_cmath.h,vcl_floor,std::floor
vcl_cmath.h,vcl_fmod,std::fmod
vcl_cmath.h,vcl_frexp,std::frexp
vcl_cmath.h,vcl_ldexp,std::ldexp
vcl_cmath.h,vcl_log,std::log
vcl_cmath.h,vcl_log10,std::log10
vcl_cmath.h,vcl_modf,std::modf
vcl_cmath.h,vcl_pow,std::pow
vcl_cmath.h,vcl_sin,std::sin
vcl_cmath.h,vcl_sinh,std::sinh
vcl_cmath.h,vcl_sqrt,std::sqrt
vcl_cmath.h,vcl_tan,std::tan
vcl_cmath.h,vcl_tanh,std::tanh
vcl_complex_fwd.h,vcl_abs,std::abs
vcl_complex.h,vcl_abs,std::abs
vcl_complex.h,vcl_arg,std::arg
vcl_complex.h,vcl_complex,std::complex
vcl_complex.h,vcl_conj,std::conj
vcl_complex.h,vcl_cos,std::cos
vcl_complex.h,vcl_cosh,std::cosh
vcl_complex.h,vcl_exp,std::exp
vcl_complex.h,vcl_imag,std::imag
vcl_complex.h,vcl_log,std::log
vcl_complex.h,vcl_log10,std::log10
vcl_complex.h,vcl_norm,std::norm
vcl_complex.h,vcl_polar,std::polar
vcl_complex.h,vcl_pow,std::pow
vcl_complex.h,vcl_real,std::real
vcl_complex.h,vcl_sin,std::sin
vcl_complex.h,vcl_sinh,std::sinh
vcl_complex.h,vcl_sqrt,std::sqrt
vcl_complex.h,vcl_tan,std::tan
vcl_complex.h,vcl_tanh,std::tanh
vcl_csetjmp.h,vcl_jmp_buf,std::jmp_buf
vcl_csetjmp.h,vcl_longjmp,std::longjmp
vcl_csignal.h,vcl_raise,std::raise
vcl_csignal.h,vcl_sig_atomic_t,std::sig_atomic_t
vcl_csignal.h,vcl_signal,std::signal
vcl_cstdarg.h,vcl_va_list,std::va_list
vcl_cstddef.h,vcl_ptrdiff_t,std::ptrdiff_t
vcl_cstddef.h,vcl_size_t,std::size_t
vcl_cstdio.h,vcl_FILE,std::FILE
vcl_cstdio.h,vcl_clearerr,std::clearerr
vcl_cstdio.h,vcl_fclose,std::fclose
vcl_cstdio.h,vcl_feof,std::feof
vcl_cstdio.h,vcl_ferror,std::ferror
vcl_cstdio.h,vcl_fflush,std::fflush
vcl_cstdio.h,vcl_fgetc,std::fgetc
vcl_cstdio.h,vcl_fgetpos,std::fgetpos
vcl_cstdio.h,vcl_fgets,std::fgets
vcl_cstdio.h,vcl_fopen,std::fopen
vcl_cstdio.h,vcl_fpos_t,std::fpos_t
vcl_cstdio.h,vcl_fprintf,std::fprintf
vcl_cstdio.h,vcl_fputc,std::fputc
vcl_cstdio.h,vcl_fputs,std::fputs
vcl_cstdio.h,vcl_fread,std::fread
vcl_cstdio.h,vcl_freopen,std::freopen
vcl_cstdio.h,vcl_fscanf,std::fscanf
vcl_cstdio.h,vcl_fseek,std::fseek
vcl_cstdio.h,vcl_fsetpos,std::fsetpos
vcl_cstdio.h,vcl_ftell,std::ftell
vcl_cstdio.h,vcl_fwrite,std::fwrite
vcl_cstdio.h,vcl_getc,std::getc
vcl_cstdio.h,vcl_getchar,std::getchar
vcl_cstdio.h,vcl_gets,std::gets
vcl_cstdio.h,vcl_perror,std::perror
vcl_cstdio.h,vcl_printf,std::printf
vcl_cstdio.h,vcl_putc,std::putc
vcl_cstdio.h,vcl_putchar,std::putchar
vcl_cstdio.h,vcl_puts,std::puts
vcl_cstdio.h,vcl_remove,std::remove
vcl_cstdio.h,vcl_rename,std::rename
vcl_cstdio.h,vcl_rewind,std::rewind
vcl_cstdio.h,vcl_scanf,std::scanf
vcl_cstdio.h,vcl_setbuf,std::setbuf
vcl_cstdio.h,vcl_setvbuf,std::setvbuf
vcl_cstdio.h,vcl_snprintf,vcl_snprintf
vcl_cstdio.h,vcl_sprintf,std::sprintf
vcl_cstdio.h,vcl_sscanf,std::sscanf
vcl_cstdio.h,vcl_tmpfile,std::tmpfile
vcl_cstdio.h,vcl_tmpnam,std::tmpnam
vcl_cstdio.h,vcl_ungetc,std::ungetc
vcl_cstdio.h,vcl_vfprintf,std::vfprintf
vcl_cstdio.h,vcl_vfscanf,std::vfscanf
vcl_cstdio.h,vcl_vprintf,std::vprintf
vcl_cstdio.h,vcl_vscanf,std::vscanf
vcl_cstdio.h,vcl_vsprintf,std::vsprintf
vcl_cstdio.h,vcl_vsscanf,std::vsscanf
vcl_cstdlib.h,vcl_abort,std::abort
vcl_cstdlib.h,vcl_abs,std::abs
vcl_cstdlib.h,vcl_atexit,std::atexit
vcl_cstdlib.h,vcl_atof,std::atof
vcl_cstdlib.h,vcl_atoi,std::atoi
vcl_cstdlib.h,vcl_atol,std::atol
vcl_cstdlib.h,vcl_calloc,std::calloc
vcl_cstdlib.h,vcl_div,std::div
vcl_cstdlib.h,vcl_exit,std::exit
vcl_cstdlib.h,vcl_free,std::free
vcl_cstdlib.h,vcl_getenv,std::getenv
vcl_cstdlib.h,vcl_labs,std::labs
vcl_cstdlib.h,vcl_ldiv,std::ldiv
vcl_cstdlib.h,vcl_malloc,std::malloc
vcl_cstdlib.h,vcl_mblen,std::mblen
vcl_cstdlib.h,vcl_mbstowcs,std::mbstowcs
vcl_cstdlib.h,vcl_mbtowc,std::mbtowc
vcl_cstdlib.h,vcl_qsort,std::qsort
vcl_cstdlib.h,vcl_rand,std::rand
vcl_cstdlib.h,vcl_realloc,std::realloc
vcl_cstdlib.h,vcl_srand,std::srand
vcl_cstdlib.h,vcl_strtod,std::strtod
vcl_cstdlib.h,vcl_strtol,std::strtol
vcl_cstdlib.h,vcl_strtoul,std::strtoul
vcl_cstdlib.h,vcl_system,std::system
vcl_cstdlib.h,vcl_wcstombs,std::wcstombs
vcl_cstdlib.h,vcl_wctomb,std::wctomb
vcl_cstring.h,vcl_memchr,std::memchr
vcl_cstring.h,vcl_memcmp,std::memcmp
vcl_cstring.h,vcl_memcpy,std::memcpy
vcl_cstring.h,vcl_memmove,std::memmove
vcl_cstring.h,vcl_memset,std::memset
vcl_cstring.h,vcl_strcat,std::strcat
vcl_cstring.h,vcl_strchr,std::strchr
vcl_cstring.h,vcl_strcmp,std::strcmp
vcl_cstring.h,vcl_strcoll,std::strcoll
vcl_cstring.h,vcl_strcpy,std::strcpy
vcl_cstring.h,vcl_strcspn,std::strcspn
vcl_cstring.h,vcl_strerror,std::strerror
vcl_cstring.h,vcl_strlen,std::strlen
vcl_cstring.h,vcl_strncat,std::strncat
vcl_cstring.h,vcl_strncmp,std::strncmp
vcl_cstring.h,vcl_strncpy,std::strncpy
vcl_cstring.h,vcl_strpbrk,std::strpbrk
vcl_cstring.h,vcl_strrchr,std::strrchr
vcl_cstring.h,vcl_strspn,std::strspn
vcl_cstring.h,vcl_strstr,std::strstr
vcl_cstring.h,vcl_strtok,std::strtok
vcl_cstring.h,vcl_strxfrm,std::strxfrm
vcl_ctime.h,vcl_asctime,std::asctime
vcl_ctime.h,vcl_clock,std::clock
vcl_ctime.h,vcl_clock_t,std::clock_t
vcl_ctime.h,vcl_ctime,std::ctime
vcl_ctime.h,vcl_difftime,std::difftime
vcl_ctime.h,vcl_gmtime,std::gmtime
vcl_ctime.h,vcl_localtime,std::localtime
vcl_ctime.h,vcl_mktime,std::mktime
vcl_ctime.h,vcl_strftime,std::strftime
vcl_ctime.h,vcl_time,std::time
vcl_ctime.h,vcl_time_t,std::time_t
vcl_ctime.h,vcl_tm,std::tm
vcl_cwchar.h,vcl_btowc,std::btowc
vcl_cwchar.h,vcl_fgetwc,std::fgetwc
vcl_cwchar.h,vcl_fgetws,std::fgetws
vcl_cwchar.h,vcl_fputwc,std::fputwc
vcl_cwchar.h,vcl_fputws,std::fputws
vcl_cwchar.h,vcl_fwide,std::fwide
vcl_cwchar.h,vcl_fwprintf,std::fwprintf
vcl_cwchar.h,vcl_fwscanf,std::fwscanf
vcl_cwchar.h,vcl_getwc,std::getwc
vcl_cwchar.h,vcl_getwchar,std::getwchar
vcl_cwchar.h,vcl_mbrlen,std::mbrlen
vcl_cwchar.h,vcl_mbrtowc,std::mbrtowc
vcl_cwchar.h,vcl_mbstate_t,std::mbstate_t
vcl_cwchar.h,vcl_putwc,std::putwc
vcl_cwchar.h,vcl_putwchar,std::putwchar
vcl_cwchar.h,vcl_swprintf,std::swprintf
vcl_cwchar.h,vcl_swscanf,std::swscanf
vcl_cwchar.h,vcl_ungetwc,std::ungetwc
vcl_cwchar.h,vcl_vfwprintf,std::vfwprintf
vcl_cwchar.h,vcl_vswprintf,std::vswprintf
vcl_cwchar.h,vcl_vwprintf,std::vwprintf
vcl_cwchar.h,vcl_wcrtomb,std::wcrtomb
vcl_cwchar.h,vcl_wcscat,std::wcscat
vcl_cwchar.h,vcl_wcschr,std::wcschr
vcl_cwchar.h,vcl_wcscmp,std::wcscmp
vcl_cwchar.h,vcl_wcscoll,std::wcscoll
vcl_cwchar.h,vcl_wcscpy,std::wcscpy
vcl_cwchar.h,vcl_wcscspn,std::wcscspn
vcl_cwchar.h,vcl_wcsftime,std::wcsftime
vcl_cwchar.h,vcl_wcslen,std::wcslen
vcl_cwchar.h,vcl_wcsncat,std::wcsncat
vcl_cwchar.h,vcl_wcsncmp,std::wcsncmp
vcl_cwchar.h,vcl_wcsncpy,std::wcsncpy
vcl_cwchar.h,vcl_wcspbrk,std::wcspbrk
vcl_cwchar.h,vcl_wcsrchr,std::wcsrchr
vcl_cwchar.h,vcl_wcsrtombs,std::wcsrtombs
vcl_cwchar.h,vcl_wcsspn,std::wcsspn
vcl_cwchar.h,vcl_wcsstr,std::wcsstr
vcl_cwchar.h,vcl_wcstod,std::wcstod
vcl_cwchar.h,vcl_wcstok,std::wcstok
vcl_cwchar.h,vcl_wcstol,std::wcstol
vcl_cwchar.h,vcl_wcsxfrm,std::wcsxfrm
vcl_cwchar.h,vcl_wctob,std::wctob
vcl_cwchar.h,vcl_wctoul,std::wctoul
vcl_cwchar.h,vcl_wint_t,std::wint_t
vcl_cwchar.h,vcl_wmemchr,std::wmemchr
vcl_cwchar.h,vcl_wmemcmp,std::wmemcmp
vcl_cwchar.h,vcl_wmemcpy,std::wmemcpy
vcl_cwchar.h,vcl_wmemmove,std::wmemmove
vcl_cwchar.h,vcl_wmemset,std::wmemset
vcl_cwchar.h,vcl_wprintf,std::wprintf
vcl_cwchar.h,vcl_wscanf,std::wscanf
vcl_cwctype.h,vcl_iswalnum,std::iswalnum
vcl_cwctype.h,vcl_iswalpha,std::iswalpha
vcl_cwctype.h,vcl_iswcntrl,std::iswcntrl
vcl_cwctype.h,vcl_iswctrans,std::iswctrans
vcl_cwctype.h,vcl_iswctype,std::iswctype
vcl_cwctype.h,vcl_iswdigit,std::iswdigit
vcl_cwctype.h,vcl_iswgraph,std::iswgraph
vcl_cwctype.h,vcl_iswlower,std::iswlower
vcl_cwctype.h,vcl_iswprint,std::iswprint
vcl_cwctype.h,vcl_iswpunct,std::iswpunct
vcl_cwctype.h,vcl_iswspace,std::iswspace
vcl_cwctype.h,vcl_iswupper,std::iswupper
vcl_cwctype.h,vcl_iswxdigit,std::iswxdigit
vcl_cwctype.h,vcl_towctrans,std::towctrans
vcl_cwctype.h,vcl_towlower,std::towlower
vcl_cwctype.h,vcl_towupper,std::towupper
vcl_cwctype.h,vcl_wctrans,std::wctrans
vcl_cwctype.h,vcl_wctrans_t,std::wctrans_t
vcl_cwctype.h,vcl_wctype,std::wctype
vcl_cwctype.h,vcl_wctype_t,std::wctype_t
vcl_cwctype.h,vcl_wint_t,std::wint_t
vcl_deque.h,vcl_deque,std::deque
vcl_deque.h,vcl_swap,std::swap
vcl_exception.h,vcl_bad_exception,std::bad_exception
vcl_exception.h,vcl_exception,std::exception
vcl_fstream.h,vcl_filebuf,std::filebuf
vcl_fstream.h,vcl_fstream,std::fstream
vcl_fstream.h,vcl_ifstream,std::ifstream
vcl_fstream.h,vcl_ofstream,std::ofstream
vcl_functional.h,vcl_binary_function,std::binary_function
vcl_functional.h,vcl_binary_negate,std::binary_negate
vcl_functional.h,vcl_bind1st,std::bind1st
vcl_functional.h,vcl_bind2nd,std::bind2nd
vcl_functional.h,vcl_binder1st,std::binder1st
vcl_functional.h,vcl_binder2nd,std::binder2nd
vcl_functional.h,vcl_const_mem_fun,std::const_mem_fun
vcl_functional.h,vcl_const_mem_fun1,std::const_mem_fun1
vcl_functional.h,vcl_const_mem_fun1_ref,std::const_mem_fun1_ref
vcl_functional.h,vcl_const_mem_fun1_ref_t,std::const_mem_fun1_ref_t
vcl_functional.h,vcl_const_mem_fun1_t,std::const_mem_fun1_t
vcl_functional.h,vcl_const_mem_fun_ref,std::const_mem_fun_ref
vcl_functional.h,vcl_const_mem_fun_ref_t,std::const_mem_fun_ref_t
vcl_functional.h,vcl_const_mem_fun_t,std::const_mem_fun_t
vcl_functional.h,vcl_divides,std::divides
vcl_functional.h,vcl_equal_to,std::equal_to
vcl_functional.h,vcl_greater,std::greater
vcl_functional.h,vcl_greater_equal,std::greater_equal
vcl_functional.h,vcl_less,std::less
vcl_functional.h,vcl_less_equal,std::less_equal
vcl_functional.h,vcl_logical_and,std::logical_and
vcl_functional.h,vcl_logical_not,std::logical_not
vcl_functional.h,vcl_logical_or,std::logical_or
vcl_functional.h,vcl_mem_fun,std::mem_fun
vcl_functional.h,vcl_mem_fun1,std::mem_fun1
vcl_functional.h,vcl_mem_fun1_ref,std::mem_fun1_ref
vcl_functional.h,vcl_mem_fun1_ref_t,std::mem_fun1_ref_t
vcl_functional.h,vcl_mem_fun1_t,std::mem_fun1_t
vcl_functional.h,vcl_mem_fun_ref,std::mem_fun_ref
vcl_functional.h,vcl_mem_fun_ref_t,std::mem_fun_ref_t
vcl_functional.h,vcl_mem_fun_t,std::mem_fun_t
vcl_functional.h,vcl_minus,std::minus
vcl_functional.h,vcl_modulus,std::modulus
vcl_functional.h,vcl_multiplies,std::multiplies
vcl_functional.h,vcl_negate,std::negate
vcl_functional.h,vcl_not1,std::not1
vcl_functional.h,vcl_not2,std::not2
vcl_functional.h,vcl_not_equal_to,std::not_equal_to
vcl_functional.h,vcl_plus,std::plus
vcl_functional.h,vcl_pointer_to_binary_function,std::pointer_to_binary_function
vcl_functional.h,vcl_pointer_to_unary_function,std::pointer_to_unary_function
vcl_functional.h,vcl_ptr_fun,std::ptr_fun
vcl_functional.h,vcl_transform,std::transform
vcl_functional.h,vcl_unary_function,std::unary_function
vcl_functional.h,vcl_unary_negate,std::unary_negate
vcl_iomanip.h,vcl_boolalpha,std::boolalpha
vcl_iomanip.h,vcl_dec,std::dec
vcl_iomanip.h,vcl_fixed,std::fixed
vcl_iomanip.h,vcl_hex,std::hex
vcl_iomanip.h,vcl_internal,std::internal
vcl_iomanip.h,vcl_left,std::left
vcl_iomanip.h,vcl_noboolalpha,std::noboolalpha
vcl_iomanip.h,vcl_noshowbase,std::noshowbase
vcl_iomanip.h,vcl_noshowpoint,std::noshowpoint
vcl_iomanip.h,vcl_noshowpos,std::noshowpos
vcl_iomanip.h,vcl_noskipws,std::noskipws
vcl_iomanip.h,vcl_nouppercase,std::nouppercase
vcl_iomanip.h,vcl_oct,std::oct
vcl_iomanip.h,vcl_resetiosflags,std::resetiosflags
vcl_iomanip.h,vcl_right,std::right
vcl_iomanip.h,vcl_scientific,std::scientific
vcl_iomanip.h,vcl_setbase,std::setbase
vcl_iomanip.h,vcl_setfill,std::setfill
vcl_iomanip.h,vcl_setiosflags,std::setiosflags
vcl_iomanip.h,vcl_setprecision,std::setprecision
vcl_iomanip.h,vcl_setw,std::setw
vcl_iomanip.h,vcl_showbase,std::showbase
vcl_iomanip.h,vcl_showpoint,std::showpoint
vcl_iomanip.h,vcl_showpos,std::showpos
vcl_iomanip.h,vcl_skipws,std::skipws
vcl_iomanip.h,vcl_uppercase,std::uppercase
vcl_ios.h,vcl_basic_ios,std::basic_ios
vcl_ios.h,vcl_fpos,std::fpos
vcl_ios.h,vcl_ios_adjustfield,std::ios::adjustfield
vcl_ios.h,vcl_ios_base,std::ios_base
vcl_ios.h,vcl_ios_basefield,std::ios::basefield
vcl_ios.h,vcl_ios_beg,std::ios::beg
vcl_ios.h,vcl_ios_boolalpha,std::ios::boolalpha
vcl_ios.h,vcl_ios_cur,std::ios::cur
vcl_ios.h,vcl_ios_dec,std::ios::dec
vcl_ios.h,vcl_ios_end,std::ios::end
vcl_ios.h,vcl_ios_fixed,std::ios::fixed
vcl_ios.h,vcl_ios_floatfield,std::ios::floatfield
vcl_ios.h,vcl_ios_fmtflags,std::ios::fmtflags
vcl_ios.h,vcl_ios_hex,std::ios::hex
vcl_ios.h,vcl_ios_internal,std::ios::internal
vcl_ios.h,vcl_ios_left,std::ios::left
vcl_ios.h,vcl_ios_noboolalpha,std::ios::noboolalpha
vcl_ios.h,vcl_ios_noshowbase,std::ios::noshowbase
vcl_ios.h,vcl_ios_noshowpoint,std::ios::noshowpoint
vcl_ios.h,vcl_ios_noshowpos,std::ios::noshowpos
vcl_ios.h,vcl_ios_noskipws,std::ios::noskipws
vcl_ios.h,vcl_ios_nouppercase,std::ios::nouppercase
vcl_ios.h,vcl_ios_oct,std::ios::oct
vcl_ios.h,vcl_ios_right,std::ios::right
vcl_ios.h,vcl_ios_scientific,std::ios::scientific
vcl_ios.h,vcl_ios_seekdir,std::ios::seekdir
vcl_ios.h,vcl_ios_showbase,std::ios::showbase
vcl_ios.h,vcl_ios_showpoint,std::ios::showpoint
vcl_ios.h,vcl_ios_showpos,std::ios::showpos
vcl_ios.h,vcl_ios_skipws,std::ios::skipws
vcl_ios.h,vcl_ios_uppercase,std::ios::uppercase
vcl_ios.h,vcl_streamoff,std::streamoff
vcl_ios.h,vcl_streamsize,std::streamsize
vcl_iosfwd.h,vcl_allocator,std::allocator
vcl_iosfwd.h,vcl_basic_filebuf,std::basic_filebuf
vcl_iosfwd.h,vcl_basic_fstream,std::basic_fstream
vcl_iosfwd.h,vcl_basic_ifstream,std::basic_ifstream
vcl_iosfwd.h,vcl_basic_ios,std::basic_ios
vcl_iosfwd.h,vcl_basic_iostream,std::basic_iostream
vcl_iosfwd.h,vcl_basic_istream,std::basic_istream
vcl_iosfwd.h,vcl_basic_istringstream,std::basic_istringstream
vcl_iosfwd.h,vcl_basic_ofstream,std::basic_ofstream
vcl_iosfwd.h,vcl_basic_ostream,std::basic_ostream
vcl_iosfwd.h,vcl_basic_ostringstream,std::basic_ostringstream
vcl_iosfwd.h,vcl_basic_streambuf,std::basic_streambuf
vcl_iosfwd.h,vcl_char_traits,std::char_traits
vcl_iosfwd.h,vcl_filebuf,std::filebuf
vcl_iosfwd.h,vcl_fpos,std::fpos
vcl_iosfwd.h,vcl_fstream,std::fstream
vcl_iosfwd.h,vcl_ifstream,std::ifstream
vcl_iosfwd.h,vcl_ios,std::ios
vcl_iosfwd.h,vcl_iostream,std::iostream
vcl_iosfwd.h,vcl_istream,std::istream
vcl_iosfwd.h,vcl_istreambuf_iterator,std::istreambuf_iterator
vcl_iosfwd.h,vcl_ofstream,std::ofstream
vcl_iosfwd.h,vcl_ostream,std::ostream
vcl_iosfwd.h,vcl_ostreambuf_iterator,std::ostreambuf_iterator
vcl_iosfwd.h,vcl_streambuf,std::streambuf
vcl_iosfwd.h,vcl_streamoff,std::streamoff
vcl_iosfwd.h,vcl_streampos,std::streampos
vcl_iosfwd.h,vcl_stringstream,std::stringstream
vcl_iosfwd.h,vcl_wfilebuf,std::wfilebuf
vcl_iosfwd.h,vcl_wfstream,std::wfstream
vcl_iosfwd.h,vcl_wifstream,std::wifstream
vcl_iosfwd.h,vcl_wios,std::wios
vcl_iosfwd.h,vcl_wiostream,std::wiostream
vcl_iosfwd.h,vcl_wistream,std::wistream
vcl_iosfwd.h,vcl_wistringstream,std::wistringstream
vcl_iosfwd.h,vcl_wofstream,std::wofstream
vcl_iosfwd.h,vcl_wostream,std::wostream
vcl_iosfwd.h,vcl_wostringstream,std::wostringstream
vcl_iosfwd.h,vcl_wstreambuf,std::wstreambuf
vcl_iosfwd.h,vcl_wstreampos,std::wstreampos
vcl_iosfwd.h,vcl_wstringbuf,std::wstringbuf
vcl_iosfwd.h,vcl_wstringstream,std::wstringstream
vcl_iostream.h,vcl_cerr,std::cerr
vcl_iostream.h,vcl_cin,std::cin
vcl_iostream.h,vcl_clog,std::clog
vcl_iostream.h,vcl_cout,std::cout
vcl_iostream.h,vcl_dec,std::dec
vcl_iostream.h,vcl_endl,std::endl
vcl_iostream.h,vcl_ends,std::ends
vcl_iostream.h,vcl_flush,std::flush
vcl_iostream.h,vcl_hex,std::hex
vcl_iostream.h,vcl_ios_app,std::ios::app
vcl_iostream.h,vcl_ios_ate,std::ios::ate
vcl_iostream.h,vcl_ios_binary,std::ios::binary
vcl_iostream.h,vcl_ios_in,std::ios::in
vcl_iostream.h,vcl_ios_openmode,std::ios::openmode
vcl_iostream.h,vcl_ios_out,std::ios::out
vcl_iostream.h,vcl_ios_trunc,std::ios::trunc
vcl_iostream.h,vcl_oct,std::oct
vcl_iostream.h,vcl_ostream,std::ostream
vcl_iostream.h,vcl_streambuf,std::streambuf
vcl_iostream.h,vcl_streampos,std::streampos
vcl_iostream.h,vcl_wcout,std::wcout
vcl_iostream.h,vcl_ws,std::ws
vcl_istream.h,vcl_basic_iostream,std::basic_iostream
vcl_istream.h,vcl_basic_istream,std::basic_istream
vcl_istream.h,vcl_iostream,std::iostream
vcl_istream.h,vcl_istream,std::istream
vcl_istream.h,vcl_wiostream,std::wiostream
vcl_istream.h,vcl_wistream,std::wistream
vcl_iterator.h,vcl_advance,std::advance
vcl_iterator.h,vcl_back_insert_iterator,std::back_insert_iterator
vcl_iterator.h,vcl_back_inserter,std::back_inserter
vcl_iterator.h,vcl_bidirectional_iterator_tag,std::bidirectional_iterator_tag
vcl_iterator.h,vcl_distance,std::distance
vcl_iterator.h,vcl_forward_iterator_tag,std::forward_iterator_tag
vcl_iterator.h,vcl_front_insert_iterator,std::front_insert_iterator
vcl_iterator.h,vcl_front_inserter,std::front_inserter
vcl_iterator.h,vcl_input_iterator_tag,std::input_iterator_tag
vcl_iterator.h,vcl_insert_iterator,std::insert_iterator
vcl_iterator.h,vcl_inserter,std::inserter
vcl_iterator.h,vcl_istream_iterator,std::istream_iterator
vcl_iterator.h,vcl_istreambuf_iterator,std::istreambuf_iterator
vcl_iterator.h,vcl_iterator,std::iterator
vcl_iterator.h,vcl_iterator_traits,std::iterator_traits
vcl_iterator.h,vcl_ostream_iterator,std::ostream_iterator
vcl_iterator.h,vcl_ostreambuf_iterator,std::ostreambuf_iterator
vcl_iterator.h,vcl_output_iterator_tag,std::output_iterator_tag
vcl_iterator.h,vcl_random_access_iterator_tag,std::random_access_iterator_tag
vcl_iterator.h,vcl_reverse_iterator,std::reverse_iterator
vcl_limits.h,vcl_float_denorm_style,std::float_denorm_style
vcl_limits.h,vcl_float_round_style,std::float_round_style
vcl_limits.h,vcl_numeric_limits,std::numeric_limits
vcl_limits.h,vcl_round_toward_neg_infinity,std::round_toward_neg_infinity
vcl_limits.h,vcl_round_toward_zero,std::round_toward_zero
vcl_list.h,vcl_list,std::list
vcl_list.h,vcl_swap,std::swap
vcl_locale.h,vcl_codecvt,std::codecvt
vcl_locale.h,vcl_codecvt_base,std::codecvt_base
vcl_locale.h,vcl_codecvt_byname,std::codecvt_byname
vcl_locale.h,vcl_collate,std::collate
vcl_locale.h,vcl_collate_byname,std::collate_byname
vcl_locale.h,vcl_ctype,std::ctype
vcl_locale.h,vcl_has_facet,std::has_facet
vcl_locale.h,vcl_isalnum,std::isalnum
vcl_locale.h,vcl_isalpha,std::isalpha
vcl_locale.h,vcl_iscntrl,std::iscntrl
vcl_locale.h,vcl_isdigit,std::isdigit
vcl_locale.h,vcl_isgraph,std::isgraph
vcl_locale.h,vcl_islower,std::islower
vcl_locale.h,vcl_isprint,std::isprint
vcl_locale.h,vcl_ispunct,std::ispunct
vcl_locale.h,vcl_isspace,std::isspace
vcl_locale.h,vcl_isupper,std::isupper
vcl_locale.h,vcl_isxdigit,std::isxdigit
vcl_locale.h,vcl_messages,std::messages
vcl_locale.h,vcl_messages_byname,std::messages_byname
vcl_locale.h,vcl_money_get,std::money_get
vcl_locale.h,vcl_money_put,std::money_put
vcl_locale.h,vcl_moneypunct,std::moneypunct
vcl_locale.h,vcl_moneypunct_byname,std::moneypunct_byname
vcl_locale.h,vcl_num_get,std::num_get
vcl_locale.h,vcl_num_put,std::num_put
vcl_locale.h,vcl_numpunct,std::numpunct
vcl_locale.h,vcl_numpunct_byname,std::numpunct_byname
vcl_locale.h,vcl_time_get,std::time_get
vcl_locale.h,vcl_time_get_byname,std::time_get_byname
vcl_locale.h,vcl_time_put,std::time_put
vcl_locale.h,vcl_time_put_byname,std::time_put_byname
vcl_locale.h,vcl_tolower,std::tolower
vcl_locale.h,vcl_toupper,std::toupper
vcl_locale.h,vcl_use_facet,std::use_facet
vcl_map.h,vcl_map,std::map
vcl_map.h,vcl_multimap,std::multimap
vcl_map.h,vcl_swap,std::swap
vcl_memory.h,vcl_allocator,std::allocator
vcl_memory.h,vcl_auto_ptr,std::auto_ptr
vcl_memory.h,vcl_get_temporary_buffer,std::get_temporary_buffer
vcl_memory.h,vcl_raw_storage_iterator,std::raw_storage_iterator
vcl_memory.h,vcl_return_temporary_buffer,std::return_temporary_buffer
vcl_memory.h,vcl_uninitialized_copy,std::uninitialized_copy
vcl_memory.h,vcl_uninitialized_fill,std::uninitialized_fill
vcl_memory.h,vcl_uninitialized_fill_n,std::uninitialized_fill_n
vcl_new.h,vcl_bad_alloc,std::bad_alloc
vcl_new.h,vcl_set_new_handler,std::set_new_handler
vcl_numeric.h,vcl_accumulate,std::accumulate
vcl_numeric.h,vcl_adjacent_difference,std::adjacent_difference
vcl_numeric.h,vcl_inner_product,std::inner_product
vcl_numeric.h,vcl_partial_sum,std::partial_sum
vcl_ostream.h,vcl_basic_ostream,std::basic_ostream
vcl_ostream.h,vcl_endl,std::endl
vcl_ostream.h,vcl_ends,std::ends
vcl_ostream.h,vcl_flush,std::flush
vcl_ostream.h,vcl_ostream,std::ostream
vcl_ostream.h,vcl_wostream,std::wostream
vcl_queue.h,vcl_priority_queue,std::priority_queue
vcl_queue.h,vcl_queue,std::queue
vcl_set.h,vcl_multiset,std::multiset
vcl_set.h,vcl_set,std::set
vcl_set.h,vcl_swap,std::swap
vcl_sstream.h,vcl_basic_stringbuf,std::basic_stringbuf
vcl_sstream.h,vcl_istringstream,std::istringstream
vcl_sstream.h,vcl_ostringstream,std::ostringstream
vcl_sstream.h,vcl_stringbuf,std::stringbuf
vcl_sstream.h,vcl_stringstream,std::stringstream
vcl_sstream.h,vcl_wstringbuf,std::wstringbuf
vcl_stack.h,vcl_stack,std::stack
vcl_stdexcept.h,vcl_domain_error,std::domain_error
vcl_stdexcept.h,vcl_invalid_argument,std::invalid_argument
vcl_stdexcept.h,vcl_length_error,std::length_error
vcl_stdexcept.h,vcl_logic_error,std::logic_error
vcl_stdexcept.h,vcl_out_of_range,std::out_of_range
vcl_stdexcept.h,vcl_overflow_error,std::overflow_error
vcl_stdexcept.h,vcl_range_error,std::range_error
vcl_stdexcept.h,vcl_runtime_error,std::runtime_error
vcl_stdexcept.h,vcl_underflow_error,std::underflow_error
vcl_streambuf.h,vcl_basic_streambuf,std::basic_streambuf
vcl_streambuf.h,vcl_streambuf,std::streambuf
vcl_string.h,vcl_basic_string,std::basic_string
vcl_string.h,vcl_char_traits,std::char_traits
vcl_string.h,vcl_getline,std::getline
vcl_string.h,vcl_string,std::string
vcl_string.h,vcl_swap,std::swap
vcl_string.h,vcl_wstring,std::wstring
vcl_typeinfo.h,vcl_bad_cast,std::bad_cast
vcl_typeinfo.h,vcl_bad_typeid,std::bad_typeid
vcl_typeinfo.h,vcl_type_info,std::type_info
vcl_utility.h,vcl_make_pair,std::make_pair
vcl_utility.h,vcl_pair,std::pair
vcl_valarray.h,vcl_abs,std::abs
vcl_valarray.h,vcl_acos,std::acos
vcl_valarray.h,vcl_asin,std::asin
vcl_valarray.h,vcl_atan,std::atan
vcl_valarray.h,vcl_atan2,std::atan2
vcl_valarray.h,vcl_cos,std::cos
vcl_valarray.h,vcl_cosh,std::cosh
vcl_valarray.h,vcl_exp,std::exp
vcl_valarray.h,vcl_gslice,std::gslice
vcl_valarray.h,vcl_gslice_array,std::gslice_array
vcl_valarray.h,vcl_indirect_array,std::indirect_array
vcl_valarray.h,vcl_log,std::log
vcl_valarray.h,vcl_log10,std::log10
vcl_valarray.h,vcl_mask_array,std::mask_array
vcl_valarray.h,vcl_pow,std::pow
vcl_valarray.h,vcl_sin,std::sin
vcl_valarray.h,vcl_sinh,std::sinh
vcl_valarray.h,vcl_slice,std::slice
vcl_valarray.h,vcl_slice_array,std::slice_array
vcl_valarray.h,vcl_sqrt,std::sqrt
vcl_valarray.h,vcl_tan,std::tan
vcl_valarray.h,vcl_tanh,std::tanh
vcl_valarray.h,vcl_valarray,std::valarray
vcl_vector.h,vcl_swap,std::swap
vcl_vector.h,vcl_vector,std::vector
vcl_cerrno.h,vcl_cerr,std::cerr
vcl_exception.h,vcl_throw,throw
vcl_exception.h,vcl_try,try
vcl_exception.h,vcl_catch_all,catch(...)
vcl_exception.h,vcl_catch,catch
vcl_ios.h,vcl_ios,std::ios
"""
vcl_replace_head_names = OrderedDict()
vcl_replace_functionnames = OrderedDict()
vcl_replace_manual = OrderedDict()
for line in info_for_conversion.splitlines():
linevalues = line.split(",")
if len(linevalues) != 3:
# print("SKIPPING: " + str(linevalues))
continue
fname = linevalues[0]
new_name = fname.replace("vcl_", "").replace(".h", "")
vcl_replace_head_names[f'#include "{fname}"'] = f'#include "{new_name}"'
vcl_replace_head_names[f"#include <{fname}>"] = f"#include <{new_name}>"
vcl_pat = linevalues[1]
new_pat = linevalues[2]
vcl_replace_functionnames[vcl_pat] = new_pat
# Need to fix the fact that both std::ios is a base and a prefix
if "std::ios::" in new_pat:
vcl_replace_manual[new_pat.replace("std::ios::", "std::ios_")] = new_pat
# print(vcl_replace_head_names)
# print(vcl_replace_functionnames)
cfile = sys.argv[1]
file_as_string = ""
with open(cfile) as rfp:
file_as_string = rfp.read()
orig_file = file_as_string
if (
file_as_string.find("std::cout")
or file_as_string.find("std::cerr")
or file_as_string.find("std::cin")
):
required_header = "#include <vcl_compiler.h>\n#include <iostream>\n"
else:
required_header = "#include <vcl_compiler.h>\n"
for searchval, replaceval in vcl_replace_head_names.items():
file_as_string_new = file_as_string.replace(searchval, required_header + replaceval)
if file_as_string_new != file_as_string:
required_header = ""
file_as_string = file_as_string_new
for searchval, replaceval in vcl_replace_functionnames.items():
file_as_string = file_as_string.replace(searchval, replaceval)
for searchval, replaceval in vcl_replace_manual.items():
file_as_string = file_as_string.replace(searchval, replaceval)
if orig_file != file_as_string:
print("Processing: " + cfile)
with open(cfile, "w") as wfp:
wfp.write(file_as_string)
else:
print("NO CHANGES NEEDED: " + cfile)
|
994,655 | c49da458c86dd66d1912c3d6e6e9b45a124fc7ee | from utils import *
"""
Data completeness audit object in a form of a callback for SAX content handler.
This audit class checks compliance to gold standard. The nonconformities can be requested after parsing.
This audit is only applied to elements which has a tag element child with k = amenity and v = pharmacy
"""
class DataCompletenessAudit(object):
"""
Constructor.
The specified standard is a list of tuples:
- Pharmacy name
- Adress
- standard: gold standard list
- warnings: toggle to report warnings
"""
def __init__(self, standard, warnings=False):
self._standard = standard
self._missing = standard[:]
self._nonconformities = [ ]
self._warnings = warnings
"""
Method called back when a start event is encountered.
- stack: stack of elements being read
- locator: locator object from SAX parser
"""
def startEventCallback(self, stack, locator):
pass
"""
Method called back when an end event is encountered.
- name: element name
- children: element children
- locator: locator object from SAX parser
"""
def endEventCallback(self, name, children, locator):
#Find item with a tag child having amenity as k value and pharmacy as v value and compare to standard
match = findTagInChildren(children, 'amenity', 'pharmacy')
if match is not None:
name = findTagInChildren(children, 'name')
found = False
for i in xrange(len(self._missing)):
if compareStrings(self._missing[i][0], name):
found = True
break
if found:
self._missing.pop(i)
elif self._warnings:
message = u'Pharmacy "{}" found but not expected.'.format(name)
self._nonconformities.append(('Warning', message))
"""
Return nonconformities.
A list of tuple is returned:
- type of audit
- nonconformity description
"""
def getNonconformities(self):
for row in self._missing:
message = u'Pharmacy "{}" is missing in dataset.'.format(row[0])
self._nonconformities.append(('Completeness', message))
return self._nonconformities |
994,656 | 030e02a7572f4c994238033b678023bbd2f21692 | import numpy as np
from neural_network_3 import *
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import time
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import os
from visualize_weights import visualize_weights
def get_sorted_files_by_modified_date(directory):
# !/usr/bin/env python
from stat import S_ISREG, ST_CTIME, ST_MODE
import os, sys, time
# path to the directory (relative or absolute)
dirpath = directory
# get all entries in the directory w/ stats
entries = (os.path.join(dirpath, fn) for fn in os.listdir(dirpath))
entries = ((os.stat(path), path) for path in entries)
# leave only regular files, insert creation date
entries = ((stat[ST_CTIME], path)
for stat, path in entries if S_ISREG(stat[ST_MODE]))
# NOTE: on Windows `ST_CTIME` is a creation date
# but on Unix it could be something else
# NOTE: use `ST_MTIME` to sort by a modification date
# for cdate, path in sorted(entries):
# print time.ctime(cdate), os.path.basename(path)
return [path for stat, path in sorted(entries)]
def get_weight_animation_sequence():
# nets = sorted(os.listdir("./saved_networks"))
nets = get_sorted_files_by_modified_date("./saved_networks/")
print nets
for net in nets:
yield load_from_file(net)
rows = 5
cols = 6
shape = (28,28)
layer = 1
# rows = 1
# cols = 10
# shape = (10,10)
# layer = 2
weight_animation_sequence = get_weight_animation_sequence()
frames = [visualize_weights(net.weights[layer], rows, cols, shape) for net in weight_animation_sequence]
frames_it = cycle(frames)
fig = plt.figure()
def f(x, y):
return np.sin(x) + np.cos(y)
# im = plt.imshow(np.eye(28), cmap=cm.Greys_r, animated=True)
im = plt.imshow(0.2+0.6*np.eye(28), cmap=plt.get_cmap('afmhot'), animated=True)
def updatefig(*args):
im.set_array(frames_it.next())
return im,
ani = animation.FuncAnimation(fig, updatefig, interval=0, blit=True)
plt.show()
|
994,657 | 30738df38a9093be5044f832fc555a991c9a663e | from tkinter import *
def save_info():
firstname_info = firstname.get()
lastname_info = lastname.get()
age_info = age.get()
print(firstname_info,lastname_info,age_info)
file = open("user.txt","w")
file.write("Your First Name " + firstname_info)
file.write("\n")
file.write("Your Last Name " + lastname_info)
file.write("\n")
file.write("Your Age " + str(age_info))
file.close()
app = Tk()
app.geometry("500x500")
app.title("Python File Handling in Forms")
heading = Label(text="Python File Handling in Forms",fg="black",bg="yellow",width="500",height="3",font="10")
heading.pack()
firstname_text = Label(text="FirstName :")
lastname_text = Label(text="LastName :")
age_text = Label(text="Age :")
firstname_text.place(x=15,y=70)
lastname_text.place(x=15,y=140)
age_text.place(x=15,y=210)
firstname = StringVar()
lastname = StringVar()
age = IntVar()
first_name_entry = Entry(textvariable=firstname,width="30")
last_name_entry = Entry(textvariable=lastname,width="30")
age_entry = Entry(textvariable=age,width="30")
first_name_entry.place(x=15,y=100)
last_name_entry.place(x=15,y=180)
age_entry.place(x=15,y=240)
button = Button(app,text="Submit Data",command=save_info,width="30",height="2",bg="grey")
button.place(x=15,y=290)
mainloop()
|
994,658 | db41b2bc968a4b410259fe97257d56cab3a6b56d | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: Beam
@Mail:506556658@qq.com
@file: server_socket.py
@time: 2017/4/15 11:07
"""
import socket
class Socket(object):
def __init__(self):
self.sock = socket.socket() #实例化socket对象sock
self.sock.bind(('127.0.0.1',36969)) #绑定监听端口
self.sock.listen() #开始监听
def connect(self):
self.conn,self.addr = self.sock.accept() #等待client连接 conn就是client连接过来生成的一个连接实例
self.message = self.conn.recv(1024) #接收client的数据并赋值,最多1024字节
if not self.message :
return False
print(self.addr,"发来命令 >>:",self.message)
self.conn.send(self.message) #返回结果到client端
def __del__(self):
self.sock.close() #关闭socket连接
def main():
s = Socket()
while True:
s.connect()
if __name__ == '__main__':
main() |
994,659 | a4c086d25e1e03fb79f7521d5dbc5f39020a5bdb | class Error(Exception):
'''Base class for exceptions in this module.'''
pass
class ColorError(Error):
'''Exception raised for invalid color value given for a GridSquare'''
def __init__(self, valuegiven):
self.valuegiven = valuegiven
self.message = 'ColorError: Expected white, light gray, or dark gray, but given: \' '+valuegiven+'\'.'
class CoordinatesError(Error):
'''Exception raised for invalid coordinate values given for a GridSquare'''
def __init__(self, givenx, giveny ):
self.givenx = givenx
self.giveny = giveny
self.message = 'CoordinatesError: Expected x value between 0 and 159 and y value between 0 and 119, but given x value: ' + givenx + ' and y value: ' + giveny + '.'
|
994,660 | 6c9ba2964ece991219405f481301952270fc9306 | class Device:
def __init__(self, id=None, token=None, platform=None, endpoint=None, created_at=None, updated_at=None):
self.id = id
self.token = token
self.platform = platform
self.endpoint = endpoint
self.created_at = created_at
self.updated_at = updated_at
|
994,661 | 2e29196f5b76ded93de194295d7efdedc1372ba4 | import pandas as pd
data = pd.read_json('/Users/minhdam/PycharmProjects/test/visualization/visualization/spiders/Output/vnexpress.txt', lines=True)
data.to_csv('/Users/minhdam/PycharmProjects/test/visualization/visualization/spiders/Output/sosanhgia.csv', encoding='utf8') |
994,662 | 08c6fe7beaba702ed152c7cc0d7c9663c56433c9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.code import If
from hwt.interfaces.utils import addClkRstn
from hwt.synthesizer.param import Param
from hwtHls.hls import Hls
from hwtHls.platform.virtual import VirtualHlsPlatform
from hwtLib.samples.statements.ifStm import SimpleIfStatement
class SimpleIfStatementHls(SimpleIfStatement):
def _config(self):
self.CLK_FREQ = Param(int(100e6))
def _declr(self):
addClkRstn(self)
super(SimpleIfStatementHls, self)._declr()
def _impl(self):
with Hls(self, freq=self.CLK_FREQ) as h:
io = h.io
a = io(self.a)
b = io(self.b)
c = io(self.c)
d = io(self.d)
If(a,
d(b),
).Elif(b,
d(c),
).Else(
d(c)
)
if __name__ == "__main__": # alias python main function
from hwt.synthesizer.utils import toRtl
u = SimpleIfStatementHls()
p = VirtualHlsPlatform()
print(toRtl(u, targetPlatform=p))
|
994,663 | 583b8ce9327143be3dfebe42593471d3b3b498d7 |
import pytest
from delphin.eds import EDS, Node, from_mrs, EDSWarning
from delphin.mrs import MRS, EP, HCons
@pytest.fixture
def dogs_bark():
return {
'top': 'e2',
'nodes': [Node('e2', '_bark_v_1', type='e', edges={'ARG1': 'x4'}),
Node('_1', 'udef_q', edges={'BV': 'x4'}),
Node('x4', '_dog_n_1', type='x')]
}
@pytest.fixture
def dogs_bark_mrs():
return MRS(
top='h0',
index='e2',
rels=[EP('_bark_v_1', label='h1', args={'ARG0': 'e2', 'ARG1': 'x4'}),
EP('udef_q', label='h3',
args={'ARG0': 'x4', 'RSTR': 'h5', 'BODY': 'h6'}),
EP('_dog_n_1', label='h7', args={'ARG0': 'x4'})],
hcons=[HCons.qeq('h0', 'h1'), HCons.qeq('h5', 'h7')]
)
def test_empty_EDS():
d = EDS()
assert d.top is None
assert d.nodes == []
def test_basic_EDS(dogs_bark):
d = EDS(**dogs_bark)
assert d.top == 'e2'
assert len(d.nodes) == 3
assert d.nodes[0].predicate == '_bark_v_1'
assert d.nodes[1].predicate == 'udef_q'
assert d.nodes[2].predicate == '_dog_n_1'
assert d.nodes[0].edges == {'ARG1': 'x4'}
assert d.nodes[1].edges == {'BV': 'x4'}
assert d.nodes[2].edges == {}
assert len(d.edges) == 2
assert d.edges[0] == ('e2', 'ARG1', 'x4')
assert d.edges[1] == ('_1', 'BV', 'x4')
def test_from_mrs(dogs_bark, dogs_bark_mrs):
d = from_mrs(dogs_bark_mrs)
e = EDS(**dogs_bark)
assert d[d.top] == e[e.top] and d.nodes == e.nodes
assert d == e
# recover TOP from INDEX
dogs_bark_mrs.top = None
d = from_mrs(dogs_bark_mrs)
e = EDS(**dogs_bark)
assert d == e
# no TOP or INDEX
dogs_bark_mrs.index = None
with pytest.warns(EDSWarning):
d = from_mrs(dogs_bark_mrs)
e = EDS(**{'top': None, 'nodes': dogs_bark['nodes']})
assert d == e
def test_from_mrs_broken_hcons_issue_319(dogs_bark_mrs):
# broken top
dogs_bark_mrs.rels[0].label = 'h99'
with pytest.warns(EDSWarning):
d = from_mrs(dogs_bark_mrs)
assert d.top == 'e2'
# it probably rained
m = MRS(
top='h0',
index='e2',
rels=[EP('_probable_a_1', label='h1', args={'ARG0': 'i4', 'ARG1': 'h5'}),
EP('_rain_v_1', label='h6', args={'ARG0': 'e2'})],
hcons=[HCons.qeq('h0', 'h1'), HCons.qeq('h5', 'h6')]
)
# no warning normally
e = from_mrs(m)
# broken hcons
m.rels[1].label = 'h99'
with pytest.warns(EDSWarning):
d = from_mrs(m)
assert len(d.nodes) == 2
assert len(d.arguments()['i4']) == 0
|
994,664 | 9670afc11883311278dff3f78990ba6549c6a1fa | import logging
from pyramid.config import Configurator
from pyramid.view import view_config
from hackohio.mood import Mood
from hackohio.secrets import get_secret
from hackohio import soundcloud
from pyramid.response import Response
logger = logging.getLogger(__name__)
def main(global_config, **settings):
config = Configurator(settings=settings)
config.include("pyramid_debugtoolbar")
config.add_static_view("/static", "hackohio:static/")
# Config jinja2 renderer
config.include("pyramid_jinja2")
config.add_jinja2_renderer(".html")
config.add_jinja2_search_path("hackohio:html/", ".html")
# Routes
config.add_route("index", "/")
config.add_route("playlist", "/playlist/{name}")
config.add_route("soundcloud_tracks", "/soundcloud/tracks")
config.add_route("soundcloud_streams", "/soundcloud/streams")
config.add_route("soundcloud_file", "/soundcloud/file")
for mood_provider in ["webcam", "voice", "twitter"]:
config.add_route("mood#%s" % mood_provider, "/mood/%s" % mood_provider)
logger.info("Creating WSGI server")
config.scan()
return config.make_wsgi_app()
@view_config(route_name="index", renderer="index.html", request_method="GET")
def index_view(request):
client_id = get_secret("soundcloud", "client_id")
return {"client_id": client_id}
@view_config(route_name="playlist", renderer="json", request_method="GET")
def playlist_view(request):
name = request.matchdict.get("name")
if name == "happy":
return [{
"media": "/static/media/happy1.ogg",
"cover": "/static/media/happy1.jpg",
"title": "Happy Song 1",
"artist": "Happy Guy",
"album": "Happy Album",
}, {
"media": "/static/media/happy2.ogg",
"cover": "/static/media/happy2.jpg",
"title": "Happy Song 2",
"artist": "Happy Guy",
"album": "Happy Album 2",
}, {
"media": "/static/media/happy3.ogg",
"cover": "/static/media/happy3.jpg",
"title": "Happy Song 3",
"artist": "Happy Person",
"album": "Happy II",
}]
elif name == "sad":
return [{
"media": "/static/media/sad1.mp3",
"cover": "/static/media/sad1.jpg",
"title": "Sad Song 1",
"artist": "Sad Guy",
"album": "Sad Album",
}, {
"media": "/static/media/sad2.ogg",
"cover": "/static/media/sad2.jpg",
"title": "Sad Song 2",
"artist": "Sad Guy",
"album": "Sad Album 2",
}, {
"media": "/static/media/sad3.ogg",
"cover": "/static/media/sad3.jpg",
"title": "Sad Song 3",
"artist": "Sad Person",
"album": "Sad II",
}]
if name == "angry":
return [{
"media": "/static/media/angry1.mp3",
"cover": "/static/media/angry1.jpg",
"title": "Angry Song 1",
"artist": "Angry Guy",
"album": "Angry Album",
}, {
"media": "/static/media/angry2.ogg",
"cover": "/static/media/angry2.jpg",
"title": "Angry Song 2",
"artist": "Angry Guy",
"album": "Angry Album 2",
}, {
"media": "/static/media/angry3.ogg",
"cover": "/static/media/angry3.jpg",
"title": "Angry Song 3",
"artist": "Angry Person",
"album": "Angry II",
}]
else:
return [{
"media": "/static/media/normal1.ogg",
"cover": "/static/media/normal1.jpg",
"title": "Normal Song 1",
"artist": "Normal Guy",
"album": "Normal Album",
}, {
"media": "/static/media/normal2.ogg",
"cover": "/static/media/normal2.jpg",
"title": "Normal Song 2",
"artist": "Normal Guy",
"album": "Normal Album 2",
}, {
"media": "/static/media/normal3.ogg",
"cover": "/static/media/normal3.jpg",
"title": "Normal Song 3",
"artist": "Normal Person",
"album": "Normal II",
}]
@view_config(route_name="mood#twitter", renderer="json", request_method="GET")
def mood_twitter_view(request):
twitter_handle = request.GET.get('handle')
return {
"mood": Mood.mood_from_twitter(twitter_handle)
}
@view_config(route_name="mood#webcam", renderer="string", request_method="POST")
def mood_webcam_view(request):
picture = request.POST.get('webcam').file
# TODO: Check validity? Resize?
try:
mood = Mood.mood_from_picture(picture)
logger.debug("Mood: %r" % mood)
return mood
except Exception as e:
logger.debug("microsoft picture api failed", exc_info=True)
return "none"
@view_config(route_name="soundcloud_tracks", renderer="json",
request_method="GET", http_cache=3600)
def soundcloud_tracks(request):
playlist_id = request.GET.get("playlist_id")
return soundcloud.get_playlist_tracks(playlist_id)
@view_config(route_name="soundcloud_streams", renderer="json",
request_method="GET", http_cache=3600)
def soundcloud_streams(request):
track_id = request.GET.get("track_id")
return soundcloud.get_stream_url(track_id)
@view_config(route_name="soundcloud_file", request_method="GET",
http_cache=3600)
def soundcloud_file(request):
track_id = request.GET.get("track_id")
request.response.content_type = "audio/mp3"
data = soundcloud.get_data(track_id)
return Response(body=data, content_type="audio/mp3")
|
994,665 | 45b06dc84ee1b31a870f3ac345f02c6fcb3ad70e | """
The Tribonacci sequence Tn is defined as follows:
T0 = 0, T1 = 1, T2 = 1, and Tn+3 = Tn + Tn+1 + Tn+2 for n >= 0.
Given n, return the value of Tn.
Input: n = 4
Output: 4
Explanation:
T_3 = 0 + 1 + 1 = 2
T_4 = 1 + 1 + 2 = 4
"""
# Solution 1 : Memoization
class Solution:
memo = {0:0, 1:1, 2:1}
def tribonacci(self, n: int) -> int:
if n in self.memo:
return self.memo[n]
self.memo[n] = self.tribonacci(n-1) + self.tribonacci(n-2) + self.tribonacci(n-3)
return self.memo[n]
# Time Complexity = O(n)
# Space Complexity = O(n)
# Solution 2 : Tabulation
class Solution:
def tribonacci(self, n: int) -> int:
lookup = [0,1,1]
for i in range(3,n+1):
lookup.append(lookup[i-1] + lookup[i-2] + lookup[i-3])
return lookup[n]
# Time Complexity = O(n)
# Space Complexity = O(n)
|
994,666 | e1fbdf8372d5f6c2e2e4dac21cca250f1d91af32 | from flask import Blueprint
bp = Blueprint('message', __name__)
@bp.route('/twiml/message/', methods=['POST'])
def message():
pass
@bp.route('/twiml/message/fallback/', methods=['POST'])
def message_fallback():
pass
@bp.route('/twiml/message/status/', methods=['POST'])
def message_status():
pass
|
994,667 | d7ae76a1f4c06b419811e337959fe19a5ad4ca69 | from rest_framework import serializers
from curricula.models import LearningLectureStat, LearningLecture, LearningLectureVideo, LearningLectureLiveScribe, LearningLectureText, LearningLectureYoutube
from .lecture_content import LearningLectureLiveScribeSerializer, LearningLectureVideoSerializer, LearningLectureTextSerializer, LearningLectureYoutubeSerializer
from generic_relations.relations import GenericRelatedField
from tests.models import Test
from .lecture_stat import LearningLectureStatSerializer
from components.serializers import ComponentSerializer
class LearningLectureListSerializer(serializers.ListSerializer):
def to_representation(self, data):
lesson = self.context.get('lesson', None)
if lesson is not None:
data = data.filter(lesson__id=lesson)
return super(LearningLectureListSerializer, self).to_representation(data)
class LearningLectureSerializer(serializers.ModelSerializer):
content_object = GenericRelatedField({
LearningLectureVideo: LearningLectureVideoSerializer(),
LearningLectureLiveScribe: LearningLectureLiveScribeSerializer(),
LearningLectureText: LearningLectureTextSerializer(),
LearningLectureYoutube: LearningLectureYoutubeSerializer(),
})
class Meta:
list_serializer_class = LearningLectureListSerializer
model = LearningLecture
fields = ('id', 'name', 'summary', 'content', 'position', 'content_object', 'created', 'updated', 'subject', 'publisher')
extra_kwargs = {
'slug': {'read_only': True, 'required': False}
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
request = self.context.get("request")
stat = self.context.get("stat")
if request is not None and stat is not None:
if request.user:
self.fields['stat'] = serializers.SerializerMethodField()
practice = self.context.get('practice', None)
if practice is not None:
self.fields['practice'] = serializers.SerializerMethodField()
def get_practice(self, lecture):
practice = {}
if lecture.practice_id:
queryset = Test.objects.prefetch_related('questions').filter(id=lecture.practice_id).first()
test_result = 0
result_query = queryset.tests.order_by('-id').first()
if result_query:
test_result = result_query.test_result
practice = {
'id': queryset.id,
'name': queryset.name,
'question_count': queryset.questions.count(),
'test_result': test_result
}
return practice
def get_stat(self, lecture):
request = self.context.get('request')
if request is not None:
if request:
queryset = LearningLectureStat.objects.filter(user=request.user, lecture=lecture).first()
serializer = LearningLectureStatSerializer(queryset, many=False)
return serializer.data
return {}
|
994,668 | f41d21c57ae038c17a3ea22dc6dd1362f371d0dc | from typing import Any, Callable, Iterator, TypeVar
O = TypeVar('O')
T = TypeVar('T')
def callUnpacked(predicate: T) -> Callable[[Iterator[Any]], O]:
return lambda it: predicate(*it)
|
994,669 | f8a7b4eaed877156e252ec7a3eb327f1c73becab | from datetime import datetime, timedelta
from fitcompetition.withings import WithingsService
from local_settings import WITHINGS_PASSWORD, WITHINGS_USER_NAME
from dateutil.relativedelta import relativedelta
from django.contrib.auth.decorators import login_required
from django.db.models import Q, Count, Sum
from django.http import HttpResponse
from django.shortcuts import render, redirect
from fitcompetition import tasks
from fitcompetition.models import Challenge, FitnessActivity, FitUser, Team, Transaction, Challenger
from fitcompetition.settings import TEAM_MEMBER_MAXIMUM, TIME_ZONE
from fitcompetition.util.ListUtil import createListFromProperty, attr
import pytz
def challenges(request):
now = datetime.now(tz=pytz.timezone(TIME_ZONE))
currentChallenges = Challenge.objects.currentChallenges(userid=request.user.id)
upcomingChallenges = Challenge.objects.upcomingChallenges(userid=request.user.id)
pastChallenges = Challenge.objects.pastChallenges(userid=request.user.id)
challengeStats = Challenge.objects.filter(reconciled=True).aggregate(grandTotalDisbursed=Sum('totalDisbursed'),
totalWinnerCount=Sum('numWinners'))
accountFilter = Q()
unReconciledChallenges = Challenge.objects.filter(reconciled=False)
if len(unReconciledChallenges) > 0:
for challenge in unReconciledChallenges:
accountFilter |= Q(account=challenge.account)
transactionResult = Transaction.objects.filter(accountFilter).aggregate(upForGrabs=Sum('amount'))
else:
transactionResult = {
'upForGrabs': 0
}
return render(request, 'challenges.html', {
'currentChallenges': currentChallenges,
'upcomingChallenges': upcomingChallenges,
'pastChallenges': pastChallenges,
'totalPaid': attr(challengeStats, 'grandTotalDisbursed', defaultValue=0),
'upForGrabs': transactionResult.get('upForGrabs'),
'playingNow': Challenger.objects.filter(challenge__reconciled=False).count(),
'totalAllTimePlayers': Challenger.objects.all().count(),
'unReconciledChallenges': unReconciledChallenges.count(),
'totalCompletedChallenges': Challenge.objects.filter(reconciled=True).count()
})
@login_required
def profile(request):
return user(request, attr(request, 'user').id)
@login_required
def account(request):
return render(request, 'account.html')
def user(request, id):
try:
user = FitUser.objects.get(id=id)
except FitUser.DoesNotExist:
user = None
activeUserChallenges, upcomingUserChallenges, completedUserChallenges = Challenge.objects.userChallenges(id)
thirtyDaysAgo = datetime.today() + relativedelta(days=-30)
recentActivities = FitnessActivity.objects.select_related('type').filter(user=user).order_by('-date')[:20]
return render(request, 'user.html', {
'userprofile': user,
'activeChallenges': activeUserChallenges,
'upcomingUserChallenges': upcomingUserChallenges,
'completedChallenges': completedUserChallenges,
'recentActivities': recentActivities
})
def team(request, id):
try:
team = Team.objects.prefetch_related('members').get(id=id)
except Team.DoesNotExist:
team = None
members = team.members.all()
return render(request, 'team.html', {
'team': team,
'teamMembers': members
})
def faq(request):
return render(request, 'faq.html', {})
def challenge_slug(request, slug):
try:
c = Challenge.objects.prefetch_related('approvedActivities', 'players', 'teams').get(slug=slug)
return challenge_view(request, c)
except Challenge.DoesNotExist:
return redirect('challenges')
def challenge_id(request, id):
try:
c = Challenge.objects.prefetch_related('approvedActivities', 'players', 'teams').get(id=id)
return challenge_view(request, c)
except Challenge.DoesNotExist:
return redirect('challenges')
def challenge_view(request, challenge):
now = datetime.now(tz=pytz.utc)
isCompetitor = False
recentActivitiesWithoutEvidence = []
if request.user.is_authenticated():
isCompetitor = request.user in challenge.players.all()
if isCompetitor and challenge.startdate <= now <= challenge.enddate:
tasks.syncExternalActivities.delay(request.user.id)
approvedTypes = challenge.approvedActivities.all()
if isCompetitor and challenge.proofRequired:
recentWithoutEvidenceFilter = Q(user=request.user) & Q(hasProof=False) & Q(date__gte=(now + timedelta(hours=-24)))
typeFilter = Q()
for activityType in approvedTypes:
typeFilter |= Q(type=activityType)
recentWithoutEvidenceFilter &= typeFilter
recentActivitiesWithoutEvidence = FitnessActivity.objects.filter(recentWithoutEvidenceFilter)
footFilter = Q(name__contains="Running")
footFilter |= Q(name__contains="Walking")
footFilter |= Q(name__contains="Hiking")
isFootRace = len(challenge.approvedActivities.filter(footFilter)) > 0
params = {
'show_social': 'social-callout-%s' % challenge.id not in request.COOKIES.get('hidden_callouts', ''),
'disqus_identifier': 'fc_challenge_%s' % challenge.id,
'challenge': challenge,
'canJoin': challenge.canJoin and not isCompetitor,
'isCompetitor': isCompetitor,
'approvedActivities': createListFromProperty(approvedTypes, 'name'),
'numPlayers': challenge.numPlayers,
'canWithdraw': isCompetitor and not challenge.hasStarted,
'recentActivities': challenge.getRecentActivities()[:15],
'isFootRace': isFootRace,
'recentActivitiesWithoutEvidence': recentActivitiesWithoutEvidence
}
if challenge.isTypeIndividual:
params['players'] = challenge.getChallengersWithActivities()
params['teams'] = []
elif challenge.isTypeTeam:
params['open_teams'] = Team.objects.filter(challenge=challenge).annotate(num_members=Count('members')).filter(
num_members__lt=TEAM_MEMBER_MAXIMUM)
if request.user.is_authenticated():
try:
team = Team.objects.get(challenge=challenge, members__id__exact=request.user.id)
params['open_teams'] = params['open_teams'].exclude(id=team.id)
except Team.DoesNotExist:
pass
params['teams'] = challenge.rankedTeams
params['canSwitchTeams'] = isCompetitor and not challenge.hasStarted
return render(request, 'challenge.html', params)
def user_activities(request, userID, challengeID):
activities = []
if challengeID is not None and userID is not None:
challenge = Challenge.objects.get(id=challengeID)
activitiesFilter = challenge.getActivitiesFilter(generic=True)
activitiesFilter = Q(user_id=userID) & activitiesFilter
activities = FitnessActivity.objects.filter(activitiesFilter).order_by('-date')
return render(request, 'user_activities.html', {
'activities': activities,
'challenge': challenge
})
@login_required
def diagnostics(request):
if request.GET.get('syncActivities') is not None:
tasks.syncExternalActivities(request.user.id)
elif request.GET.get('pruneActivities') is not None:
tasks.pruneExternalActivities(request.user.id)
elif request.GET.get('syncProfile') is not None:
tasks.syncExternalProfile(request.user.id)
elif request.GET.get('resetSyncDate') is not None:
user = FitUser.objects.get(id=request.user.id)
user.lastExternalSyncDate = None
user.save()
return render(request, 'diagnostics.html', {})
def weight(request):
JASON = 6130175
SHALAUNA = 6130387
service = WithingsService(WITHINGS_USER_NAME, WITHINGS_PASSWORD)
return render(request, 'weight.html', {
'jasonsMeasurements': service.getWeightMeasurements(JASON),
'shalaunasMeasurements': service.getWeightMeasurements(SHALAUNA)
})
def login_error(request):
return HttpResponse("login error")
def login(request):
return render(request, 'login.html', {})
|
994,670 | 030ae54b870342a9b5a1452c2540ef37110dce37 | #coding = utf-8
import sys,os
sys.path.append(os.path.dirname(os.getcwd()))
from elasticsearch import Elasticsearch
from common import common_log
class ElasticObj:
def __init__(self, index_name,index_type,ip='127.0.0.1'):
self._info = common_log.Common_Log()
'''
:param index_name: 索引名称
:param index_type: 索引类型
'''
self.index_name = index_name
self.index_type = index_type
# 无用户名密码状态
self.es = Elasticsearch([ip])
# 用户名密码状态
# self.es = Elasticsearch([ip],http_auth=('elastic', 'password'),port=9200)
def Get_Data_By_Body(self,doc):
_searched = self.es.search(index=self.index_name, doc_type=self.index_type, body=doc)
self._info.log("es_doc="+str(doc))
# self._info.log("es_results="+str(_searched))
return _searched
if __name__ == "__main__":
t = ElasticObj("item_tire_detail","_doc",ip ="168.61.148.253")
doc = {'query': {'match_all': {}}}
print(t.Get_Data_By_Body(doc)) |
994,671 | 873ff11674da8ef0b095f3d5b9ee209b7a90681d | from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.hashers import check_password
from accounts.models.user import User
from django.conf import settings
import json
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from .serializers.comment_serializer import CommentSerializer
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from django.http import JsonResponse
from rest_framework.authtoken.views import ObtainAuthToken
# class RunappIndex(APIView):
# authentication_classes = [SessionAuthentication, BasicAuthentication]
# permission_classes = [IsAuthenticated]
# def get(self, request, format=None):
# content = {
# 'user': unicode(request.user), # `django.contrib.auth.User` instance.
# 'auth': unicode(request.auth), # None
# }
# return Response(content)
class RunappIndex(APIView):
def get(self, request, format=None):
token = Token.objects.create(user=...)
print(token.key)
@csrf_exempt
def authenticate(request, username=None, password=None, *args, **kwargs):
login_valid = (settings.ADMIN_LOGIN == request.GET['username'])
pwd_valid = check_password(request.GET['password'], settings.ADMIN_PASSWORD)
# print(request.GET['username'], request.GET['password'], settings.ADMIN_LOGIN, settings.ADMIN_PASSWORD)
if login_valid and pwd_valid:
try:
user = User.objects.get(username='quangnv')
# user = {'phan':'jeje'}
print('fuck', user)
except User.DoesNotExist:
# Create a new user. There's no need to set a password
# because only the password from settings.py is checked.
# user = User(username=request.GET.get('username'))
# user.is_staff = True
# user.is_superuser = True
# user.save()
user = 'kaka'
print('buc minh')
return HttpResponse(json.dumps(user), content_type="application/json")
return HttpResponse('ok')
def get_user(user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class create_auth_token(ObtainAuthToken):
def get(self, request):
tim = []
for user in User.objects.all():
ken = Token.objects.get_or_create(user=user)
print(Token.objects.get_or_create(user=user))
tim.append(ken)
return HttpResponse(tim)
|
994,672 | 46594104cd3b99bd91a50979626ed5bdd810bac7 | # 给定一个整数数组 asteroids,表示在同一行的行星。
#
# 对于数组中的每一个元素,其绝对值表示行星的大小,正负表示行星的移动方向(正表示向右移动,负表示向左移动)。每一颗行星以相同的速度移动。
#
# 找出碰撞后剩下的所有行星。碰撞规则:两个行星相互碰撞,较小的行星会爆炸。如果两颗行星大小相同,则两颗行星都会爆炸。两颗移动方向相同的行星,永远不会发生碰撞
# 。
#
#
#
# 示例 1:
#
#
# 输入:asteroids = [5,10,-5]
# 输出:[5,10]
# 解释:10 和 -5 碰撞后只剩下 10 。 5 和 10 永远不会发生碰撞。
#
# 示例 2:
#
#
# 输入:asteroids = [8,-8]
# 输出:[]
# 解释:8 和 -8 碰撞后,两者都发生爆炸。
#
# 示例 3:
#
#
# 输入:asteroids = [10,2,-5]
# 输出:[10]
# 解释:2 和 -5 发生碰撞后剩下 -5 。10 和 -5 发生碰撞后剩下 10 。
#
#
#
# 提示:
#
#
# 2 <= asteroids.length <= 10⁴
# -1000 <= asteroids[i] <= 1000
# asteroids[i] != 0
#
# Related Topics 栈 数组 👍 283 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
from collections import deque
from typing import List
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
s = deque()
n = len(asteroids)
s.append(asteroids[0])
idx = 1
while idx < n:
num = asteroids[idx]
if not s:
s.append(num)
idx += 1
continue
if num > 0:
s.append(num)
if num < 0:
# 此时从栈中逐个弹出数据, 和当前元素对比
if s[-1] < 0:
s.append(num)
else:
while s:
last = s.pop()
# 相对方向才有可能碰撞
if last > 0:
if abs(num) > last:
# 此时最后一个元素破碎, 继续和剩余的栈内元素碰撞
if not s:
s.append(num)
break
if s[-1] < 0:
s.append(num)
break
continue
elif abs(num) == last:
# 两个元素抵消, 跳出
break
else:
s.append(last)
break
idx += 1
return list(s)
# leetcode submit region end(Prohibit modification and deletion)
if __name__ == '__main__':
print(Solution().asteroidCollision(asteroids=[1, -1, -2, -2]))
|
994,673 | 8027242147f4a535da0b7ffa8caef8d90acb9f76 | import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from util.my_math_utils import *
from sequences.viterbi import viterbi
from sequences.forward_backward import forward_backward,sanity_check_forward_backward
import sequences.discriminative_sequence_classifier as dsc
class CRF_batch(dsc.DiscriminativeSequenceClassifier):
''' Implements a first order CRF'''
def __init__(self,dataset,feature_class,regularizer=0.01):
dsc.DiscriminativeSequenceClassifier.__init__(self,dataset,feature_class)
self.regularizer = regularizer
def train_supervised(self,sequence_list):
self.parameters = np.zeros(self.feature_class.nr_feats)
emp_counts = self.get_empirical_counts(sequence_list)
params,_,d = optimize.fmin_l_bfgs_b(self.get_objective,self.parameters,args=[sequence_list,emp_counts],factr = 1e14,maxfun = 500,iprint = 1,pgtol=1e-5)
self.parameters = params
self.trained = True
return params
def get_objective(self,parameters,sequence_list,emp_counts):
self.parameters = parameters
gradient = np.zeros(parameters.shape)
gradient += emp_counts
objective = 0
likelihoods = 0
exp_counts = np.zeros(parameters.shape)
for sequence in sequence_list:
seq_obj,seq_lik = self.get_objective_seq(parameters,sequence,exp_counts)
objective += seq_obj
likelihoods += seq_lik
objective -= 0.5*self.regularizer*np.dot(parameters,parameters)
if(likelihoods != 0):
objective -= np.log(likelihoods)
else:
print "likelihoods == 0"
gradient -= self.regularizer*parameters
gradient -= exp_counts
##Since we are minizing we need to multiply both the objective and gradient by -1
objective = -1*objective
gradient = gradient*-1
# print "Objective: %f"%objective
#print gradient
# print "Gradient norm: %f"%np.sqrt(np.dot(gradient,gradient))
## Sicne we are minimizing and not maximizing
return objective,gradient
def test_get_objective_seq(self,parameters,seq,times):
exp_counts = np.zeros(parameters.shape)
for i in xrange(times):
self.get_objective_seq(parameters,seq,exp_counts)
def test(self):
a = [1,2,3]
b = np.arange(1,2000,1)
c = 0
for i in xrange(1000000):
c += b[a]
def test2(self):
a = [1,2,3]
b = np.arange(1,2000,1)
c = 0
for i in xrange(1000000):
for j in a:
c += b[j]
def get_objective_seq(self,parameters,seq,exp_counts):
#print seq.nr
nr_states = self.nr_states
node_potentials,edge_potentials = self.build_potentials(seq)
forward,backward = forward_backward(node_potentials,edge_potentials)
H,N = forward.shape
likelihood = np.sum(forward[:,N-1])
#node_posteriors = self.get_node_posteriors_aux(seq,forward,backward,node_potentials,edge_potentials,likelihood)
#edge_posteriors = self.get_edge_posteriors_aux(seq,forward,backward,node_potentials,edge_potentials,likelihood)
seq_objective = 0
for pos in xrange(N):
true_y = seq.y[pos]
for state in xrange(H):
node_f_list = self.feature_class.get_node_features(seq,pos,state)
backward_aux = backward[state,pos]
forward_aux = forward[state,pos]
forward_aux_div_likelihood = forward_aux/likelihood
##Iterate over feature indexes
prob_aux = forward_aux_div_likelihood*backward_aux
for fi in node_f_list:
## For the objective add both the node features and edge feature dot the parameters for the true observation
if(state == true_y):
seq_objective += parameters[fi]
## For the gradient add the node_posterior ##Compute node posteriors on the fly
exp_counts[fi] += prob_aux
#Handle transitions
if(pos < N-1):
true_next_y = seq.y[pos+1]
for next_state in xrange(H):
backward_aux2 = backward[next_state,pos+1]
node_pot_aux = node_potentials[next_state,pos+1]
edge_f_list = self.feature_class.get_edge_features(seq,pos+1,next_state,state)
## For the gradient add the edge_posterior
edge_aux = edge_potentials[state,next_state,pos]
prob_aux = forward_aux_div_likelihood*edge_aux*node_pot_aux*backward_aux2
for fi in edge_f_list:
## For the objective add both the node features and edge feature dot the parameters for the true observation
if(next_state == true_next_y):
seq_objective += parameters[fi]
exp_counts[fi] += prob_aux
return seq_objective,likelihood
def get_empirical_counts(self,sequence_list):
emp_counts = np.zeros(self.feature_class.nr_feats)
for seq_node_features,seq_edge_features in self.feature_class.feature_list:
for f_l in seq_node_features:
for f in f_l:
emp_counts[f] += 1
for f_l in seq_edge_features:
for f in f_l:
emp_counts[f] += 1
return emp_counts
def print_node_posteriors(self,seq,node_posteriors):
print seq.nr
print seq
H,N = node_posteriors.shape
txt = []
for i in xrange(H):
txt.append("%s\t"%self.dataset.int_to_pos[i])
for pos in xrange(N):
for i in xrange(H):
txt[i] += "%f\t"%node_posteriors[i,pos]
for i in xrange(H):
print txt[i]
print ""
print ""
def posterior_decode(self,seq):
posteriors = self.get_node_posteriors(seq)
self.print_node_posteriors(seq,posteriors)
res = np.argmax(posteriors,axis=0)
new_seq = seq.copy_sequence()
new_seq.y = res
return new_seq
|
994,674 | 469c9bd9bb443eb5b8a47f93a150b11f1521a24e | '''
Trains a simple convnet to recognise a smile
'''
from __future__ import print_function
from os.path import exists
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, Activation, SeparableConv2D, GlobalAveragePooling2D
from keras import backend as K
from keras.applications import *
from keras.models import load_model
import keras.backend as K
import numpy as np
import pandas as pd
from PIL import Image
import argparse
from data_utils import load_data_to_labels
from data_utils import generate_data
from data_utils import Plotter
batch_size = 64
num_target_values = 2
epochs = 20
steps_per_epoch = 32
def mean_absolute_error(y_true, y_pred):
return K.mean(np.absolute(y_true - y_pred))
def create_model(input_shape, all_layers_trainable = False):
conv_base = Xception(input_shape = input_shape, include_top = False, weights = 'imagenet')
is_layer_trainable = False
for layer in conv_base.layers:
if layer.name == 'block14_sepconv1': # we can start with some other
is_layer_trainable = True
else:
is_layer_trainable = all_layers_trainable
layer.trainable = is_layer_trainable
model = Sequential()
model.add(conv_base)
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(num_target_values, name="prediction"))
return (model)
def make_all_layers_trainable(mdl, is_trainable = True):
for layer in mdl.layers:
layer.trainable = is_trainable
return (mdl)
def train(patch_size, label_path, image_path, train_all_layers):
# input image dimensions: TODO get from data or command-line params
input_shape = (patch_size, patch_size, 3)
train, test, valid = load_data_to_labels(label_path, train_fraction = 0.7, test_fraction = 0.15)
train_len = len(train)
test_len = len(test)
valid_len = len(valid)
print('Input data: train_len: ' + str(train_len) + ", test_len: " + str(test_len) + ", valid_len: " + str(valid_len))
model = None
if exists("best_student.mdl"):
print("Found a pre-trained model, so loading that")
model = load_model("best_student.mdl")
print("Making all layers trainable")
model = make_all_layers_trainable(model)
else:
print("No pre-trained model found, creating a new one")
model = create_model(input_shape)
if train_all_layers:
model = make_all_layers_trainable(model)
model.compile(loss=keras.losses.mean_squared_error,
optimizer=keras.optimizers.RMSprop(lr=0.01, clipnorm=1),
# optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['mae'])
#if exists("student.mdl"):
# model.load_weights("student.mdl")
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_mean_absolute_error')
early_stop = keras.callbacks.EarlyStopping(monitor='val_mean_absolute_error', patience=10)
model_checkpoint = keras.callbacks.ModelCheckpoint("best_student.mdl", save_best_only=True, monitor='val_mean_absolute_error')
no_nan = keras.callbacks.TerminateOnNaN()
tb = keras.callbacks.TensorBoard()
plotter = Plotter(input_shape[0])
# TODO New callback to do sample inference after each epoch
model.fit_generator(generate_data(image_path, train, batch_size, patch_size),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[reduce_lr,early_stop,model_checkpoint, no_nan, tb, plotter],
verbose=1,
validation_data=generate_data(image_path, valid, batch_size, patch_size),
validation_steps=valid_len/batch_size)
model.save("transfer_student.mdl")
print('Evaluation on latest model:')
score = model.evaluate_generator(generate_data(image_path, test, batch_size, patch_size), steps=16)
print('\tTest loss:', score[0])
print('\tTest MSE:', score[1])
best_model = keras.models.load_model('best_student.mdl')
print('Evaluation on best model:')
score = best_model.evaluate_generator(generate_data(image_path, test, batch_size, patch_size), steps=16)
print('\tTest loss:', score[0])
print('\tTest MSE:', score[1])
num_test_samples = 512
# evaluating on latest model
print("\nCalculating error over " + str(num_test_samples) + " test samples... (using latest model)")
predictions = model.predict_generator(generate_data(image_path, test, num_test_samples, patch_size), steps=1)
np.save("predictions_latest.npy", predictions)
# Make it easy to compare predictions to actuals for test data
test_vs_predict = []
for i in range(0, num_test_samples):
sample = {"file":test[i][0],
"brightness":test[i][1], "sharpness":test[i][2],
"brightness_student":predictions[i][0], "sharpness_student":predictions[i][1]}
test_vs_predict.append(sample)
print("Saving errors on " + str(num_test_samples) + " to csv")
df = pd.DataFrame(test_vs_predict)
df.to_csv("predictions_vs_test_latest.csv")
# evaluating on best model
print("\nCalculating error over " + str(num_test_samples) + " test samples... (using best model)")
predictions = best_model.predict_generator(generate_data(image_path, test, num_test_samples, patch_size), steps=1)
np.save("predictions_best.npy", predictions)
# Make it easy to compare predictions to actuals for test data
test_vs_predict = []
for i in range(0, num_test_samples):
sample = {"file":test[i][0],
"brightness":test[i][1], "sharpness":test[i][2],
"brightness_student":predictions[i][0], "sharpness_student":predictions[i][1]}
test_vs_predict.append(sample)
print("Saving errors on " + str(num_test_samples) + " to csv")
df = pd.DataFrame(test_vs_predict)
df.to_csv("predictions_vs_test_best.csv")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--images", help="Path to images to train on", type=str, required=True)
parser.add_argument("--patchsize", help="Dimensions for input image", type=int, required=False, default=299)
parser.add_argument("--labels", help="File containing training labels", type=str, default="image_to_smile.json")
parser.add_argument("--train")
parser.add_argument("--trainall", action='store_true',help="Train all layers")
args = parser.parse_args()
train(args.patchsize, args.labels, args.images, args.trainall)
|
994,675 | 0f3b659f2c025fe1bc9d26619d3e26a6be7873d3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import fritzconnection as fc
from datetime import timedelta
import click
ADDRESS = 'fritz.box'
PASSWORD = ''
# print header
print('''
______ _ _
| ____| (_) |
| |__ _ __ _| |_ ____
| __| '__| | __|_ /
| | | | | | |_ / /
|_| |_| |_|\__/___|
''')
@click.group()
# @click.option('--password', prompt=True, hide_input=True, confirmation_prompt=False)
def main(password=PASSWORD):
"""
Small python script to talk to your fritzbox
"""
global c
print('[+] Connecting to "{}" ... '.format(ADDRESS))
c = fc.FritzConnection(address=ADDRESS, password=password)
# check connection by getting DeviceInfo
try:
print('[+] Connected to ', c.call_action('DeviceInfo:1', 'GetInfo')['NewModelName'])
except Exception as e:
print('[-] Could not connect!')
print(e)
exit(1)
@main.command()
def info():
'''
Get basic info
'''
status = c.call_action('WANIPConn:1', 'GetStatusInfo')
link = c.call_action('WANCommonIFC', 'GetCommonLinkProperties')
print('Status ', status['NewConnectionStatus'])
print('Provider Link ', link['NewPhysicalLinkStatus'])
print('Dslite ', c.call_action('WANCommonIFC:1', 'X_AVM_DE_GetDsliteStatus')['NewX_AVM_DE_DsliteStatus'])
print('Access Type ', link['NewWANAccessType'])
print('Uptime ', str(timedelta(seconds=status['NewUptime'])))
print('IPv6 ', c.call_action('WANIPConn:1', 'X_AVM_DE_GetExternalIPv6Address')['NewExternalIPv6Address'])
print('IPv4 ', c.call_action('WANIPConn:1', 'GetExternalIPAddress')['NewExternalIPAddress'])
print('Down Rate ', link['NewLayer1DownstreamMaxBitRate'] / 1000000)
print('Up Rate ', link['NewLayer1UpstreamMaxBitRate'] / 1000000)
@main.command()
def reconnect():
'''
Reconnect your fritzbox, get new ip
'''
print('[+] Reconnecting ...')
c.reconnect()
@main.command()
def reboot():
'''
Reboot your fritzbox
'''
print('[+] Rebooting ...')
c.call_action('DeviceConfig:1', 'Reboot')
print('[+] done!')
@main.command()
def hosts():
'''
List all active network clients
'''
print('[+] Getting Hosts:')
numHosts = c.call_action('Hosts:1', 'GetHostNumberOfEntries')['NewHostNumberOfEntries']
for i in range(numHosts):
host = c.call_action('Hosts:1', 'GetGenericHostEntry', NewIndex=i)
if host['NewActive'] and host['NewHostName'] != 'fritz.box':
print(host['NewHostName'], '==', host['NewIPAddress'], '==', host['NewInterfaceType'])
@main.command()
def logs():
'''
Print logs
'''
print('[+] Getting Logs:')
logs = c.call_action('DeviceInfo:1', 'GetDeviceLog')['NewDeviceLog']
# reverse order
for line in reversed(logs.split('\n')):
print(line)
if __name__ == '__main__':
main()
|
994,676 | a099a8d0610786d24f52cd6289033938d096d358 | import copy
class Graph():
def __init__(self, nodes):
self.nodes = nodes
def add_edge(self, from_, to):
self.nodes[from_].adjecent.add(to)
self.nodes[to].adjecent.add(from_)
def print_graph(self):
for vertex in self.nodes:
for adj in vertex.adjecent:
print(adj.n)
def is_eulerian(self):
count = 0
for vertex in self.nodes:
if len(vertex.adjecent)%2!=0:
count+=1
return count==2 or count==len(self.nodes)
def eulerian_path(self):
path = []
stack = []
current = None
if not self.is_eulerian():
for vertex in self.nodes:
if len(vertex.adjecent)%2==0:
current = vertex
break
else:
current = self.nodes[0]
while len(current.adjecent)>0:
if len(vertex.adjecent)==0:
path.append(vertex.n)
else:
stack.append(vertex)
to = list(vertex.adjecent)[0]
vertex.adjecent.pop()
print(self.nodes[to].adjecent)
print(vertex.n)
self.nodes[to].adjecent.remove(vertex.n)
current = self.nodes[to]
return path
class Node():
def __init__(self, n):
self.n = n
adjecent = set()
def main():
n, m = map(int, input().split(" "))
while n != 0 and m != 0:
graph = Graph([Node(n) for vertex in range(n)])
for _ in range(m):
from_, to = map(int, input().split(" "))
graph.add_edge(from_, to)
graph.print_graph()
print(graph.eulerian_path())
if __name__ == "__main__":
main() |
994,677 | 06f8199272fea7285ea83a5aabc320b6ae907c2e | class Node(object):
"""A node in a tree"""
def __init__(self, data, children=None):
self.data = data
if children is None:
self.children = []
else:
self.children = children
def __repr__(self):
"""Reader-friendly representation."""
return "<Node %s>" % self.data
def breadth_first_search(self, data):
to_visit = [self]
while to_visit:
current = to_visit.pop(0)
if current.data == data:
return current
to_visit.extend(current.children)
def depth_first_search(self, data):
to_visit = [self]
while to_visit:
current = to_visit.pop()
if current.data == data:
return current
to_visit.extend(current.children)
if __name__ == '__main__':
resume = Node("resume.txt")
recipes = Node("recipes.txt")
jane = Node("jane/", [resume, recipes])
server = Node("server.py")
jessica = Node("jessica/", [server])
users = Node("Users/", [jane, jessica])
root = Node("/", [users])
print root.breadth_first_search("resume.txt")
print root.depth_first_search('recipes.txt')
crabbe = Node("Crabbe", [])
seamus = Node("Seamus", [])
neville = Node("Neville", [])
parvati = Node("Parvati", [])
lavender = Node("Lavender", [])
malfoy = Node("Malfoy", [crabbe])
ron = Node("Ron", [seamus, neville])
hermione = Node("Hermione", [parvati, lavender])
padma = Node("Padma", [])
snape = Node("Snape", [malfoy])
mcgonagall = Node("McGonagall", [ron, hermione])
flitwick = Node("Flitwick", [padma])
dumbledore = Node("Dumbledore", [snape, mcgonagall, flitwick])
print dumbledore.breadth_first_search("Crabbe")
print dumbledore.depth_first_search("Crabbe") |
994,678 | f3155c5030e80ee0841f7e365cc740fff221f591 | from utils import SupplyResult, clean_after_module
from utils.tech import get_dev_channel
subreddit = 'all'
t_channel = get_dev_channel()
def send_post(submission, r2t):
total_size = clean_after_module()
r2t.send_text('Deleted: ' + str(round(total_size / (1024.0 ** 3), 3)) + 'GB.')
return SupplyResult.STOP_THIS_SUPPLY
|
994,679 | 30b74c08404237670c83400f1cb0314070a97573 | #!/usr/bin/env python
# Load required modules
import matplotlib
matplotlib.use('Agg')
import sys, os, argparse, pandas as pd, numpy as np
import seaborn as sns, matplotlib.pyplot as plt
from sklearn.externals import joblib
sns.set_style('whitegrid')
# Load mutation signatures visualizations
this_dir = os.path.dirname(__file__)
viz_dir = os.path.join(this_dir, '..', '..', '../mutation-signatures-viz/src')
print(viz_dir)
sys.path.append( viz_dir )
from mutation_signatures_visualization import plot_signatures
# Parse command-line arguments
# parser = argparse.ArgumentParser()
# parser.add_argument('-cf', '--counts_file', type=str, required=True)
# parser.add_argument('-sf', '--signature_file', type=str, required=True)
# parser.add_argument('-ef', '--exposure_file', type=str, required=True)
# parser.add_argument('-of', '--output_file', type=str, required=True)
# args = parser.parse_args(sys.argv[1:])
# Load the signatures and the counts
sigs = pd.read_csv(snakemake.input[0], sep="\t", index_col=0)
sigs.index = [i.replace("Topic", "Signature ") for i in sigs.index]
# Load the exposures
exposures = pd.read_csv(snakemake.input[1], sep="\t", index_col=0)
exposures.columns = [i.replace("Topic", "Signature ") for i in exposures.columns]
# Load the counts
sbs96_df = pd.read_csv(snakemake.input[2], sep='\t', index_col=0)
# required input are the counts_df, the sigs_df and the exposures_df
# plot_signatures(counts_df, signature_df, exposure_df, output_file)
plot_signatures(sbs96_df, sigs, exposures, snakemake.output[0])
# phi are the signatures
# theta is the proportional contribution of each signature to each patient
# Compute contribution per signature
|
994,680 | 797bc00dfb029400265e57f6168a00682bcf60c1 | import json
from model.models import User, Dialog
from util.datetime_utils import DateTimeUtils
class UpdateHistoryManager(object):
def __init__(self, dialogs_holder):
self.dict_of_users_updates = dict()
self.dict_of_users_histories = dict()
self.dialogs_holder = dialogs_holder
def on_new_msg(self, msg):
dialog_id = msg.dialog_id
found, dialog = self.dialogs_holder.get_dialog(did=dialog_id)
if found:
for other_user_id in dialog.list_of_users:
try:
user_updates = self.dict_of_users_updates[other_user_id]
except KeyError:
user_updates = self.dict_of_users_updates[other_user_id] = UserUpdateHolder(user_id=other_user_id)
user_updates.add(message=msg, dialog_id=dialog_id)
return True, "OK"
else:
return False, "dialog with did[%s] not found" % dialog_id
def on_get_update_json(self, user_id):
try:
user_updates = self.dict_of_users_updates[user_id]
# copy to history storage
try:
user_histories = self.dict_of_users_histories[user_id]
except KeyError:
user_histories = self.dict_of_users_histories[user_id] = UserHistoryHolder(user_id=user_id)
result = user_updates.get_as_json()
for did, dialog_update in user_updates.storage.iteritems():
user_histories.on_add(dialog_update_list=dialog_update)
user_updates.clear()
return result
except KeyError:
return json.dumps({})
def on_get_history_json(self, user_id, dialog_id=None):
# todo: not working yet
try:
user_histories = self.dict_of_users_histories[user_id]
except KeyError:
user_histories = self.dict_of_users_histories[user_id] = UserHistoryHolder(user_id=user_id)
return user_histories.get_as_json(dialog_id=dialog_id)
# covered
class UserUpdateHolder(object): # it is a data structure
def __init__(self, user_id):
self.storage = dict()
self.user_id = user_id
def add(self, message, dialog_id):
try:
dialog_list = self.storage[dialog_id]
except KeyError:
dialog_list = self.storage[dialog_id] = list() # ?
dialog_list.append(message)
def get_as_json(self):
result = self.to_json()
# self.storage = dict() # 2. wipe out data
# # todo: is it memory safe to do such a thing
return result
def to_json(self):
"""
Returns:
str:
"""
temp = self.to_dict()
return json.dumps(temp)
def to_dict(self, target_dict=None):
"""
Recursive serialization to dict
:param target_dict:
:return:
"""
if target_dict is None:
target_dict = self.storage
result_dict = dict()
def to_inner_dict(actual_value):
if hasattr(actual_value, 'to_dict'):
return actual_value.to_dict()
else:
return actual_value
for key, value in target_dict.iteritems():
if value is not None:
if isinstance(value, dict):
result_dict[key] = self.to_dict(target_dict=value)
elif isinstance(value, list):
temp = list()
for item in value:
temp.append(to_inner_dict(actual_value=item))
result_dict[key] = temp
else:
result_dict[key] = to_inner_dict(actual_value=value)
return result_dict
def get_as_dict(self):
return dict(self.__dict__)
def clear(self):
self.storage = dict()
# covered
class UserHistoryHolder(object): # it is a data structure
def __init__(self, user_id):
self.storage = dict()
self.user_id = user_id
def on_add(self, dialog_update_list):
for msg in dialog_update_list:
dialog_id = msg.dialog_id
try:
local_dialog_list = self.storage[dialog_id]
except KeyError:
local_dialog_list = self.storage[dialog_id] = list()
local_dialog_list.append(msg)
pass
# user_id = user_update_dict['user_id']
#
# for did, d_list in user_update_dict['storage'].iteritems():
#
# for dialog_id, dialog_list in user_update_dict['storage'].iteritems():
# try:
# local_dialog_list = self.storage[dialog_id]
# except KeyError:
# local_dialog_list = self.storage[dialog_id] = list()
#
# local_dialog_list.extend(dialog_list)
#
# # user_update_dict.clear() # works only with direct object array -> call on top
def get_as_json(self, dialog_id):
try:
result = json.dumps(self.to_dict()['storage'][dialog_id])
except KeyError:
return None
return result
def to_dict(self, target_dict=None):
"""
Recursive serialization to dict
:param target_dict:
:return:
"""
if target_dict is None:
target_dict = self.storage
result_dict = dict()
def to_inner_dict(actual_value):
if hasattr(actual_value, 'to_dict'):
return actual_value.to_dict()
else:
return actual_value
for key, value in target_dict.iteritems():
if value is not None:
if isinstance(value, dict):
result_dict[key] = self.to_dict(target_dict=value)
elif isinstance(value, list):
temp = list()
for item in value:
temp.append(to_inner_dict(actual_value=item))
result_dict[key] = temp
else:
result_dict[key] = to_inner_dict(actual_value=value)
return result_dict
# covered
class UsersHolder(object):
def __init__(self):
self.storage = dict()
def add_user(self, user):
if isinstance(user, User):
try:
_temp = self.storage[user.uid]
# has to fail with KeyError for the unique instance
except KeyError:
self.storage[user.uid] = user
return True, "added"
return False, "already exists"
else:
return False, "user has to be User instance"
def remove_user(self, user_uid):
if user_uid in self.storage:
del self.storage[user_uid]
return True, "deleted"
else:
return False, "user_id not found"
def update_user(self, user):
try:
self.storage[user.uid] = user
return True, "updated"
except KeyError:
return False, "user_id not found"
def get_user(self, uid):
try:
result = self.storage[uid]
return True, result
except KeyError:
return False, "user_uid not found"
def get_all(self):
result = dict(self.storage)
return True, result
# covered
class DialogsHolders(object):
__instance = None
def __init__(self):
self.storage = dict()
@staticmethod
def get_instance():
if DialogsHolders.__instance is None:
DialogsHolders.__instance = DialogsHolders()
return DialogsHolders.__instance
def create_dialog(self, list_of_users):
if isinstance(list_of_users, list):
success, did = Dialog.did_from_users_list(list_of_users)
if success:
dialog = Dialog(dialog_id=did,
list_of_users=list_of_users,
created=DateTimeUtils.get_today_full_datetime_milliseconds())
self.storage[did] = dialog
return True, dialog
else:
return False, did
else:
return False, "list of users has to be list instance"
def remove_dialog(self, list_of_users):
if isinstance(list_of_users, list):
if len(list_of_users) > 1:
did = ""
# todo: this is a horrible idea for group chats with big number of participants
for item in list_of_users:
did += item.uid
del self.storage[did]
return True, "deleted"
else:
return False, "dialog_holder, list len has to be > 1"
else:
return False, "dialog_holder, list of users has to be list instance"
def get_dialog(self, did):
if did in self.storage:
return True, self.storage[did]
else:
return False, "not found"
|
994,681 | 04ea0e28afeb9e413de5b8606034893fdd6ce50e | #!/usr/bin/python
#script to read in a matcher generated pdb file, figure out the catalytic sidechains, and carry out some basic python commands
from pymol import cmd
from pymol import util
def showdes(desname=None):
loaded_objs = cmd.get_names('objects')
print loaded_objs
if desname == None and not(loaded_objs):
print 'Error: please load a file'
return 1
elif desname == None:
desname = loaded_objs[0]+'.pdb'
read_file = open(desname,'r')
cat_res = []
for line in read_file:
if line[0:4] == 'ATOM': break
if line[0:24] == 'REMARK BACKBONE TEMPLATE':
cols = line.split()
cat_res.append(cols[10])
elif line[0:24] == 'REMARK 0 BONE TEMPLATE':
cols = line.split()
cat_res.append(cols[11])
read_file.close()
#print cat_res
cat_string = 'resi '
for resis in cat_res:
cat_string = cat_string + resis + '+'
cat_string = cat_string[:-1] #take away last +
print cat_string
cmd.select('lig','het and !name V*')
cmd.select('cats',cat_string)
cmd.select('acts','lig around 10 and !cats and !name V* and !lig')
cmd.hide('lines')
cmd.show('sticks','lig')
cmd.show('sticks','cats')
cmd.show('car')
cmd.select('acts','(byres acts) and !hydro',enable=0)
cmd.set('cartoon_transparency','0.5')
util.cba(11,'cats')
cmd.extend("showdes",showdes)
|
994,682 | b731d9a3514cfff115f84a2114305872d851a63c |
def bath(a,b):
count = 0
d = 0
c = 1/2
while b>0:
d += c
c *= 2
b = b//2
count += 1
return int(d),c
for i in range(1,int(input())+1):
N,K = map(int,input().split())
de, num= bath(N,K)
lis = [(N-de)//int(num)]*int(num)
sumi = sum(lis)
df = N-de-sumi
for j in range(int(df)):
lis[j] += 1
v = lis[int(K-num)]
if v != 0:
a = int((v-1)//2)
b = int(v//2)
else:
a,b =0,0
print("Case #{}: {} {}".format(i,max(a,b),min(a,b)))
|
994,683 | aa09b3e34c941b58b72224b4bbb92bcfe5c34af9 | from datetime import datetime
from odoo import fields, models
class TransactionHistory(models.Model):
_name = 'transaction.history'
_description = 'book transaction'
transaction_id = fields.Many2one('transaction')
name = fields.Char(related='transaction_id.name', store=True)
date = fields.Date()
book_id = fields.Many2one('book')
|
994,684 | 979201f7a6da2738a899128c15f209d8dd5627b8 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from cache import cache
from lib.gcs import list_blobs
_MAX_BLOBS = 1
_FC_FEEDS_BUCKET = 'datacommons-feeds'
# Define blueprint
bp = Blueprint(
"factcheck",
__name__,
url_prefix='/factcheck'
)
@bp.route('/')
def homepage():
return render_template('factcheck/factcheck_homepage.html')
@bp.route('/faq')
def faq():
return render_template('factcheck/factcheck_faq.html')
@bp.route('/blog')
def blog():
return render_template('factcheck/factcheck_blog.html')
@bp.route('/download')
def download():
recent_blobs = list_blobs(_FC_FEEDS_BUCKET, _MAX_BLOBS)
return render_template(
'factcheck/factcheck_download.html', recent_blobs=recent_blobs) |
994,685 | 42e14747529c7e2a454d4e505e5cc0f8a765cf85 | import numpy as np
from time import time
from .. import func as Z
from .. import metric as metric_module
from .. import optim
from .data.dataset import Dataset
from .data.ram_dataset import RamDataset
from .data.training_data import TrainingData
from . import hook as hook_module
from .hook import Hook
def _unpack_training_data(data, val=None):
"""
Unpack the given training data.
It can take different forms:
* TrainingData: we already have a training data object.
* Given numpy arrays and `val` fraction: perform our own train/val split.
* MNIST: np.ndarray, np.ndarray
* Visual question answering: (np.ndarray, np.ndarray), np.ndarray
* No `val`: the data is a 2-tuple of (train split, val split).
* MNIST: (np.ndarray, np.ndarray), (np.ndarray, np.ndarray)
* Also MNIST: RamDataset, RamDataset
"""
if isinstance(data, TrainingData):
assert val is None
return data
if val is not None:
x, y = data
return TrainingData.from_x_y(x, y, val)
train, val = data
if not isinstance(train, Dataset):
xx, yy = train
train = RamDataset(xx, yy)
if not isinstance(val, Dataset):
xx, yy = val
val = RamDataset(xx, yy)
return TrainingData(train, val)
def _bin_or_cat(y_sample_shape, if_bin, if_cat):
return if_bin if y_sample_shape in {(), (1,)} else if_cat
def _unpack_metric(metric, y_sample_shape):
if metric in {'xe', 'cross_entropy'}:
metric = _bin_or_cat(y_sample_shape, 'binary_cross_entropy',
'categorical_cross_entropy')
elif metric in {'acc', 'accuracy'}:
metric = _bin_or_cat(y_sample_shape, 'binary_accuracy',
'categorical_accuracy')
else:
pass
return metric_module.get(metric)
def _unpack_metrics(metrics, out_shapes):
rrr = []
for i, items in enumerate(metrics):
if not isinstance(items, (list, tuple)):
items = [items]
out_shape = out_shapes[i]
loss = _unpack_metric(items[0], out_shape)
metrics = []
for item in items[1:]:
metric = _unpack_metric(item, out_shape)
metrics.append(metric)
rr = [loss] + metrics
rrr.append(rr)
return rrr
def _unpack_hook(key, value):
if value is None or value is False:
hook = None
elif value is True:
hook = getattr(hook_module, key)()
elif isinstance(value, Hook):
hook = value
else:
hook = getattr(hook_module, key)(value)
return hook
def _unpack_hooks(defaults, kwargs):
d = dict(defaults)
d.update(kwargs)
ret = []
for key, value in d.items():
hook = _unpack_hook(key, value)
if hook:
ret.append(hook)
return ret
class Model(object):
default_hooks = {
'stop': 25,
'verbose': 2,
}
def model_params(self):
raise NotImplementedError
def model_forward(self, xx, is_training):
raise NotImplementedError
def predict_on_batch(self, xx):
"""
list of np.ndarray -> list of np.ndarray
Predict on a single batch.
"""
xx = list(map(Z.constant, xx))
yy = self.model_forward(xx, False)
return list(map(Z.to_numpy, yy))
def predict(self, xx, batch_size=64):
"""
list of np.ndarray -> list of np.ndarray
Predict.
"""
lens = set()
for x in xx:
assert isinstance(x, np.ndarray)
lens.add(len(x))
assert len(lens) == 1
assert isinstance(batch_size, int)
assert 0 < batch_size
num_samples = list(lens)[0]
num_batches = num_samples // batch_size
yy = None
for i in range(num_batches):
a = i * batch_size
z = (i + 1) * batch_size
ins = []
for x in xx:
ins.append(x[a:z])
outs = self.predict_on_batch(ins)
if yy is None:
yy = outs
else:
for i, out in enumerate(outs):
yy[i] += out[i]
return yy
def train_on_batch(self, xx, yy_true, metrics, opt, hooks=None,
progress=None):
"""
Train on a single batch.
"""
if hooks is None:
hooks = []
if progress is None:
progress = {}
stop = False
for hook in hooks:
if hook.on_train_batch_begin(progress, xx, yy_true):
stop = True
if stop:
results = None
return results, None
for i, x in enumerate(xx):
xx[i] = Z.constant(x)
for i, y_true in enumerate(yy_true):
yy_true[i] = Z.constant(y_true)
is_training = True
loss_vars = []
with Z.autograd_record():
yy_pred = self.model_forward(xx, is_training)
for y_pred, y_true, funcs in zip(yy_pred, yy_true, metrics):
loss = funcs[0]
loss_var = Z.mean(loss(y_true, y_pred))
loss_vars.append(loss_var)
grad_tensors = []
for y_pred in yy_pred:
grad_tensor = Z.tensor(np.ones(1).astype(Z.floatx()))
grad_tensors.append(grad_tensor)
Z.backward(loss_vars, grad_tensors)
opt.step()
results = []
for i, (y_pred, y_true, funcs) in \
enumerate(zip(yy_pred, yy_true, metrics)):
loss_value = Z.to_scalar(loss_vars[i])
values = [loss_value]
for metric in funcs[1:]:
value = Z.to_scalar(Z.mean(metric(y_true, y_pred)))
values.append(value)
results.append(values)
stop = False
for hook in hooks:
if hook.on_train_batch_end(progress, results):
stop = True
return results, stop
def evaluate_on_batch(self, xx, yy_true, metrics, hooks=None,
progress=None):
"""
Evaluate on a single batch.
"""
if hooks is None:
hooks = []
if progress is None:
progress = {}
stop = False
for hook in hooks:
if hook.on_eval_batch_begin(progress, xx, yy_true):
stop = True
if stop:
results = None
return results, None
for i, x in enumerate(xx):
xx[i] = Z.constant(x)
for i, y_true in enumerate(yy_true):
yy_true[i] = Z.constant(y_true)
is_training = False
yy_pred = self.model_forward(xx, is_training)
results = []
for i, (y_pred, y_true, metrics) in \
enumerate(zip(yy_pred, yy_true, metrics)):
values = []
for j, metric in enumerate(metrics):
var = Z.mean(metric(y_true, y_pred))
values.append(Z.to_scalar(var))
results.append(values)
stop = False
for hook in hooks:
if hook.on_eval_batch_end(progress, results):
stop = True
return results, stop
def train_on_epoch(self, data, metrics, opt, batch_size=64, hooks=None,
progress=None):
"""
Train over a single epoch.
Users should call `train(..., stop=1)` to train for one epoch, not use
this method directly. This is called by do_train().
"""
if hooks is None:
hooks = []
if progress is None:
progress = {}
stop = False
for hook in hooks:
if hook.on_epoch_begin(progress, data):
stop = True
if stop:
results = None
return results, stop
num_batches = data.get_num_batches(batch_size)
train_metrics_per_output = \
list(map(lambda funcs: [[] for _ in funcs], metrics))
val_metrics_per_output = \
list(map(lambda funcs: [[] for _ in funcs], metrics))
t0 = time()
for batch, (xx, yy, is_training) in \
enumerate(data.each_batch(batch_size)):
sub_progress = dict(progress)
sub_progress.update({
'batch': batch,
'num_batches': num_batches,
})
if is_training:
results, stop = self.train_on_batch(
xx, yy, metrics, opt, hooks, sub_progress)
split_results = train_metrics_per_output
else:
results, stop = self.evaluate_on_batch(
xx, yy, metrics, hooks, sub_progress)
split_results = val_metrics_per_output
if results is not None:
for i, values in enumerate(results):
for j, value in enumerate(values):
split_results[i][j].append(value)
if stop:
results = None
return results, stop
t = time() - t0
results = {'time': t}
if progress:
results['progress'] = progress
mean = lambda ff: sum(ff) / len(ff)
results['train'] = []
for i, metric_value_lists in enumerate(train_metrics_per_output):
means = []
for values in metric_value_lists:
means.append(mean(values))
results['train'].append(means)
if val_metrics_per_output[0][0]:
results['val'] = []
for i, metric_value_lists in enumerate(val_metrics_per_output):
means = []
for values in metric_value_lists:
means.append(mean(values))
results['val'].append(means)
stop = False
for hook in hooks:
if hook.on_epoch_end(progress, results):
stop = True
return results, stop
def train(self, data, metrics, opt='adam', val=None, batch_size=64,
start=0, **hooks):
data = _unpack_training_data(data, val)
metrics = _unpack_metrics(metrics, data.get_sample_shapes()[1])
opt = optim.get(opt)
assert isinstance(start, int)
assert 0 <= start
hooks = _unpack_hooks(self.default_hooks, hooks)
opt.set_params(self.model_params())
train_kwargs = {
'data': data,
'metrics': metrics,
'opt': opt,
'epoch_begin': start,
'hooks': hooks,
}
epoch_end_excl = None
for hook in hooks:
z = hook.on_train_begin(self, train_kwargs)
if z is None:
continue
if epoch_end_excl is None or z < epoch_end_excl:
epoch_end_excl = z
epoch = start
history = []
while True:
progress = {
'epoch_begin': start,
'epoch': epoch,
'epoch_end_excl': epoch_end_excl,
}
results, stop = self.train_on_epoch(
data, metrics, opt, batch_size, hooks, progress)
if results is not None:
history.append(results)
if stop:
break
epoch += 1
for hook in hooks:
hook.on_train_end(history)
return history
def train_regressor(self, data, opt='adam', val=None, batch_size=64,
start=0, **hooks):
"""
Train as a regressor.
Wrapper around train() that automatically uses mean squared error loss.
Single output only.
"""
metrics = 'mean_squared_error',
return self.train(data, metrics, opt, val, batch_size, start, **hooks)
def train_classifier(self, data, opt='adam', val=None, batch_size=64,
start=0, **hooks):
"""
Train as a classifier.
Wrapper around train() that automatically uses cross-entropy loss and
adds accuracy as a metric. Single output only.
"""
metrics = 'cross_entropy', 'accuracy'
return self.train(data, [metrics], opt, val, batch_size, start, **hooks)
|
994,686 | 5c4e0d89b447b70f220216494f2da8f88a8d1ca8 | from django.db.models import Manager
from utils.db.query import GetOrCreateQuery, ObjectExisting
class ResourceProviderManager(Manager):
def get_or_create_by_name(self, name):
if name is None:
return ObjectExisting(None, False)
return GetOrCreateQuery(self.model).get_or_create(name=name)
|
994,687 | 384ab242cd3783049d2f37cb8d2ac5ac9a6d285e | from src.main.MainApplication import MainApplication
from src.utils.ClientUtils import create_new_client
from src.utils.Utils import clear_all_input, load_record, update_client_list, validate_input
from src.logic.AbstractPackingInvoice import AbstractPackingInvoiceClass
from utils.logger import log
class Packing(AbstractPackingInvoiceClass):
# Overriding method
def run(self, main: MainApplication) -> bool:
# documentation see abstract class
# The field_data formed by the window elements' key as key and the corresponding header of the data_map as
# values
field_data = {
"_PL_CLIENT_CB_": "Client Name",
"_PL_INV_IP_": "Invoice No.",
"_PL_SC_IP_": "S/C No.",
"_PL_DATE_IP_": "Date",
"_PL_DES_PORT_IP_": "Destination port",
"_PL_GOODS_DES_IP_": "Goods description",
"_PL_PACK_SP_": "Bags",
"_PL_NET_SP_": "Net weight",
"_PL_GROSS_SP_": "Gross weight",
"_PL_CBM_SP_": "Total Measurement"
}
if self.event == "_PL_NEW_BTN_":
name = create_new_client(main)
# chart the new client name into the field
main.windows_map["packing"]["_PL_CLIENT_CB_"].Update(name)
update_client_list(main, main.windows_map["packing"], "_PL_CLIENT_CB_")
return True
elif self.event == "_PL_LOAD_BTN_":
# opening a record select window and load a record to fields
return load_record(self, main, main.pck_inv_data_obj, "packing", field_data)
elif self.event == "_PL_CLA_BTN_":
# show message box
if main.mg.show_ask_box("Are you sure to clear all inputs?") == "Yes":
clear_all_input(main.windows_map["packing"], self.values)
return True
elif self.event == "_PL_SAVE_BTN_":
log(self.record)
# save the record
for each in self.values.values():
if each != "":
# check validation
result = validate_input(main.packing_ui, field_data, self.values)
if len(result) > 0:
string_builder = ""
for string in result:
string_builder = string_builder + string + "\n"
main.mg.show_warning_box(string_builder)
return True
self.save(main, field_data)
main.pck_inv_data_obj.save_data()
# show message box
main.mg.show_info_box("Record Saved!")
return True
# show message box
main.mg.show_warning_box("There is nothing to save!")
return True
elif self.event == "_PL_QUIT_BTN_":
main.windows_map["packing"].hide()
# ask for saving the unsaved changes
if main.mg.show_ask_box("Are you sure to quit the edit window?") == "Yes":
if main.mg.show_ask_box("Would you like to save?") == "Yes":
result = validate_input(main.packing_ui, field_data, self.values)
if len(result) > 0:
string_builder = ""
for string in result:
string_builder = string_builder + string + "\n"
main.mg.show_warning_box(string_builder)
main.windows_map["packing"].un_hide()
return True
self.save(main, field_data)
main.pck_inv_data_obj.save_data()
# show message box
main.mg.show_info_box("Record Saved!")
return False
main.windows_map["packing"].un_hide()
return True
elif self.event is None:
return False
else:
return True
|
994,688 | db47f2d8f1ba3b41d609874a000bffe2804e9239 | from jesse.strategies import Strategy
# test_on_reduced_position
class Test18(Strategy):
def should_long(self):
return self.price < 7
def go_long(self):
qty = 2
self.buy = qty, 7
self.stop_loss = qty, 5
self.take_profit = [
(1, 15),
(1, 13)
]
def on_reduced_position(self, order):
self.take_profit = abs(self.position.qty), self.price
def go_short(self):
pass
def should_cancel_entry(self):
return False
def filters(self):
return []
def should_short(self):
return False
|
994,689 | 43ed6dee07863168140a9b10a2d2109e85489b2a | def input():
return [item.rstrip('\n').split(',') for item in open("input.txt", 'r')]
def output(item):
open("output.txt", 'w').write(str(item))
def run(data):
l1, d1 = makeLines(data[0])
l2, d2 = makeLines(data[1])
con = []
x1 = 0
x2 = 0
y1 = 0
y2 = 0
for i in range(1,len(l1)):
x1 = l1[i-1][0]
x2 = l1[i][0]
y1 = l1[i-1][1]
y2 = l1[i][1]
if x1 == x2:
for j in range(1,len(l2)):
if min(y1,y2) < l2[j][1] and max(y1,y2) > l2[j][1] and min(l2[j-1][0],l2[j][0]) < x1 and max(l2[j-1][0],l2[j][0]) > x1:
extraDist = abs(abs(l2[j-1][0]) - abs(x1)) + abs(abs(y1) - abs(l2[j-1][1]))
con.append((i,j,extraDist))
elif y1 == y2:
for j in range(1,len(l2)):
if min(x1,x2) < l2[j][0] and max(x1,x2) > l2[j][0] and min(l2[j-1][1],l2[j][1]) < y1 and max(l2[j-1][1],l2[j][1]) > y1:
extraDist = abs(abs(l2[j-1][0]) - abs(x1)) + abs(abs(y1) - abs(l2[j-1][1]))
con.append((i,j,extraDist))
return minDist(con,d1,d2)
def minDist(con,d1,d2):
return min([d1[item[0]-1] + d2[item[1]-1] + item[2] for item in con])
def makeLines(list):
dir = ''
dist = 0
distList = [0]
x = 0
y = 0
ret = [(x,y)]
for i in range(len(list)):
dir = list[i][0]
dist = int(list[i][1:])
distList.append(distList[i] + dist)
if dir == 'R':
x += dist
elif dir == 'L':
x -= dist
elif dir == 'U':
y += dist
elif dir == 'D':
y -= dist
ret.append((x,y))
return ret, distList
if __name__ == '__main__':
output(run(input()))
|
994,690 | 7b8892bc8a3d67078526d838a8ba1b022b46b174 | """
locale.py
part of chipFish
handles and loads the localisation.txt file.
chipFish then looks up (by string) in what is essentially a huge dictionary
of language data.
"""
import sys, os
from wx import xrc
supported_languages = frozenset(["en-gb"])
language_map = {"en-gb": "Enlish (UK)", "en-us": "English (North American)", "zh-cn": "Chinese (Simplified)"}
class locale:
"""
**Purpose**
emulate a dictionary
"""
def __init__(self, language):
assert language in language_map, "language type '%s' is unknown" % language
assert language in supported_languages, "language: %s not currently supported" % language_map[language]
print "Info: Register Localisation: %s" % language
self.locale = language
self.__path_to_language_file = os.path.join(sys.path[0], "locale", self.locale, "language.txt")
self.__data = {}
self.__load()
print "Info: Registered %s translatable items" % len(self.__data)
def __load(self):
assert os.path.exists(self.__path_to_language_file), "language file not found"
oh = open(self.__path_to_language_file, "rU")
for line in oh:
line = line.strip("\n").strip("\r")
if len(line) and line[0] not in ["#", "", " ", "\n"]:
head = line.split("\t")[0]
tail = line.split("\t")[1]
self.__data[head] = tail
oh.close()
def __getitem__(self, key):
return(self.__data[key])
def load_main_gui(self, gui_handle):
"""
**Purpose**
The main gui frame of glbase contains a lot of locale
settings. Instead of stuffing all the locale stuff into there,
instead I stuff it here. It's going to have to make a mess
somewhere. Adn here is better as locale is likely to
be relatively lightweight.
**Arguments**
gui_handle
the handle to the main chipFish gui from which I can
call xrc.XRCCTRL() on.
"""
elementsToModify = ["butZoomIn", "butZoomOut", "butGoToLoc",
"butChrUp", "butChrDown"]
for element in elementsToModify:
xrc.XRCCTRL(gui_handle, element).SetLabel(self.__data[element])
return(True)
# the menus are a little different:
elementsToModify = ["file", "about", "help"]
menu_head = gui_handle.GetMenuBar()
for element in elementsToModify:
item = menu_head.FindItem(element)
print item
if item:
menu_item = menu_head.GetMenu(item)
menu_item.SetLabel(element)
def load_search_gui(self):
"""
**Purpose**
see load_main_gui() for the justification.
"""
pass
|
994,691 | dd7d4e4a50bce5ade5f9716b4cda15ad9b608538 | '''
update the 'earnings_announcement' field on all Ticker objects,
using expected earnings report dates retrieved from Moosie's API
'''
import urllib
import json
import datetime
import requests
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from requests.auth import HTTPBasicAuth
from satellite.models import Ticker, DataHarvestEventLog, DATA_HARVEST_TYPE_EARNINGS_DATES
def get_earnings_announcement_date(ticker_symbol):
"""
get the next expected earnings date from Moosie's API at:
https://fool.moosiefinance.com:8181/api/calendar/v1/company/ticker/{ticker1:ticker2}?pretty=1&canon=1
"""
earnings_announcement_url = 'https://fool.moosiefinance.com:8181/api/calendar/v1/company/ticker/%s' % ticker_symbol
earnings_response = requests.get(earnings_announcement_url, auth=HTTPBasicAuth('calendar', 'aRfy!poo38;'), verify=False)
earnings_response=earnings_response.json()
earnings_announcement_date = earnings_response[ticker_symbol]['earnings_date']
print earnings_announcement_date
return earnings_announcement_date
class Command(BaseCommand):
help = 'Updates the earnings_announcement for all Ticker objects'
def handle(self, *args, **options):
print 'starting script'
event_log = DataHarvestEventLog()
event_log.data_type = DATA_HARVEST_TYPE_EARNINGS_DATES
event_log.notes = 'running'
event_log.save()
script_start_time = datetime.datetime.now()
tickers_symbols_that_errored = set()
tickers = Ticker.objects.all().order_by('ticker_symbol')
for ticker in tickers:
ticker.promised_coverage = None
ticker_symbol = ticker.ticker_symbol
if '-' in ticker_symbol:
ticker_symbol = ticker_symbol.replace('-','.')
print ticker_symbol
try:
earnings_announcement_date = get_earnings_announcement_date(ticker_symbol)
print ticker_symbol, earnings_announcement_date
ticker.earnings_announcement = earnings_announcement_date
ticker.save()
except Exception as e:
ticker.earnings_announcement = None
ticker.save()
print "couldn't set earnings date", ticker_symbol, str(e), ticker.earnings_announcement
tickers_symbols_that_errored.add(ticker_symbol)
if ticker.earnings_announcement == None:
print ticker.promised_coverage
ticker.promised_coverage = 'Earnings date pending'
print ticker.promised_coverage
ticker.save()
else:
continue
script_end_time = datetime.datetime.now()
total_seconds = (script_end_time - script_start_time).total_seconds()
print 'time elapsed: %d seconds' % total_seconds
if tickers_symbols_that_errored:
event_log.notes = 'errors: ' + ', '.join(tickers_symbols_that_errored)
else:
event_log.notes = 'no errors'
event_log.save()
print 'finished script'
print 'tickers that errored: %d' % len(tickers_symbols_that_errored)
print ', '.join(tickers_symbols_that_errored) |
994,692 | 23c877dc5747494c4ba7246493f10cbab3da9f35 | import unittest
import subprocess
import pexpect
import sys
import re
from tests.base import GraderBase
class Part1(GraderBase) :
def test_word_is_not_palindrome(self) :
'''Testing a non-palindrome word'''
with self.run_test(__name__ + ".prog5", 'word', timeout=1) as test :
test.send('notaplaindrome\n')
try:
test.expect('(?i)is\s+not')
except Exception :
self.fail("Incorrect response to the word 'notaplaindrome'")
def test_word_is_palindrome(self) :
'''Testing a palindrome word'''
with self.run_test(__name__ + ".prog5", 'word', timeout=1) as test :
test.send('aviddiva\n')
try:
test.expect('(?i)is\s+a')
except Exception :
self.fail("Incorrect response to the word 'aviddiva'")
class Part2(GraderBase) :
def test_sentence_is_palindrome(self) :
'''Tesing a palindrome sentence.'''
with self.run_test(__name__ + ".prog5", 'sentence', timeout=1) as test :
test.send("amy must i jujitsu my ma\n")
try:
test.expect('(?i)palindrome\s+detected')
except Exception :
self.fail("Failed to detect a palindrome.")
def test_abort(self) :
'''Tesing the left button.'''
with self.run_test( __name__ + ".prog5", 'abort', timeout=1) as test :
test.send('blah blah blah blah blah blah blah blah blah blah blah')
try:
test.expect('(?i)abort')
except Exception :
self.fail("Failed to abort when I pushed the left button.")
files = [
['palindrome((_|-)word)?\.ino', Part1],
['palindrome(_|-)sentence\.ino', Part2],
]
name = "Project 5 Grader"
|
994,693 | 204dca3b13cc644bd643a138b093da599882ab1b | ##Simplified battle-ship game in python language
from random import randint
import getpass
def user_mode():
user_mode=int(input("Choose 1 for single player and 2 for two player game: "))
while True:
if user_mode == 1 or user_mode == 2:
True
break
else:
print("Invalid user input. Please choose again.")
False
user_mode=int(input("Choose 1 for single player and 2 for two player game: "))
return user_mode
def get_user_row():
print("Let's hide the ship")
user_row=int(getpass.getpass("Enter row. Enter row between 1 and 3 : "))
while True:
if user_row == 1 or user_row == 2 or user_row == 3:
True
break
else:
print("Invalid user input. Please choose again.")
False
user_row=int(getpass.getpass("Select row. Enter row between 1 and 3 : "))
return user_row
def get_user_column():
user_column=int(getpass.getpass("Enter column. Enter row between 1 and 3 : "))
while True:
if user_column == 1 or user_column == 2 or user_column == 3:
True
break
else:
print("Invalid user input. Please choose again.")
False
user_column=int(getpass.getpass("Select column. Enter row between 1 and 3 : "))
return user_column
def random_row(board):
return randint(1, len(board) - 1)
def random_col(board):
return randint(1, len(board[0]) - 1)
def print_board(board):
for row in board:
print(" ".join(row))
print("Let's play Battleship!")
user_mode=user_mode()
board = []
for x in range(3):
board.append(["O"] * 3)
if user_mode == 1:
ship_row = random_row(board)-1
ship_col = random_col(board)-1
elif user_mode == 2:
ship_row=get_user_row() -1
ship_col = get_user_column() -1
print("------------------------------------------------------")
print("Select values between 1 and 3(inclusive) for both, row and column to find the BattleShip")
print("You have 3 chances")
print_board(board)
for turn in range(3):
guess_row = int(input("Guess Row:"))-1
guess_col = int(input("Guess Col:"))-1
if guess_row == ship_row and guess_col == ship_col:
print("Congratulations! You sunk my battleship!")
break
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print("Oops, that's not even in the ocean. Please try with values between 1 and 3")
elif(board[guess_row][guess_col] == "X"):
print("You guessed that one already.")
else:
print("You missed my battleship!")
board[guess_row][guess_col] = "X"
print (turn + 1)
print_board(board)
if turn>2:
print("Game Over")
print("The ship location is Row:%d and Column: %d"%(ship_row+1,ship_col+1))
board[ship_row][ship_col] = "S"
print_board(board)
|
994,694 | 03907a98f8e397a0edbf57b573fabcdaf40da10c | import paho.mqtt.client as mqtt
import ssl
rootca = r'D:\\Sakshi\\programs\\aws\\machine6_aws\\AmazonRootCA1.pem.txt'
certificate = r'D:\\Sakshi\\programs\\aws\\machine6_aws\\098be122db-certificate.pem.crt'
key_file = r'D:\\Sakshi\\programs\\aws\\machine6_aws\\098be122db-private.pem.key'
c = mqtt.Client()
c.tls_set(rootca, certfile=certificate, keyfile=key_file, cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
broker_address = 'a5mphxmdxvg88.iot.ap-south-1.amazonaws.com'
c.connect(broker_address, 8883)
# 1883 port is for mqtt
# 8883 port is for mqtt with ssl (mqtts)
def onc(c, user_data, flags, rc):
print('Successfully connected to AWS with RC', rc)
c.subscribe("mytopic/iot")
def onm(c, user_data, msg):
my_msg = msg.payload.decode()
print('Message from AWS:', my_msg) # Message published by AWS IoT
if my_msg == 'hello':
c.publish('mytopic/iot', 'Hey AWS, This is Python!')
# Python is publishing message which will be received by AWS
c.on_connect = onc
c.on_message = onm
c.loop_forever()
|
994,695 | cfc7cc5a98e048d4ebc2b268e6fe6de77772b511 | """
.. versionadded:: 0.6.2
Module for converting/creating ``serpentTools`` objects to/from other sources
High-level functions implemented here, such as :func:`toMatlab`, help the
:ref:`cli` in quickly converting files without launching a Python interpreter.
For example,
.. code::
$ python -m serpentTools -v to-matlab my/depletion_dep.m
INFO : serpentTools: Wrote contents of my/depletion_dep.m to
my/depletion_dep.mat
"""
from serpentTools.io.base import * # noqa
|
994,696 | 33c65c011eb1b7dbcbd081d530a07ea6dba35898 | import sys
import math
import numpy as np
import matplotlib.pyplot as plt
import cv2
import skimage.morphology as morphology
def tiger_process(filename):
source0 = cv2.imread(filename)
img = source0[:, :, ::-1]
# computed from blur Radius of 3 and stackoverflow: https://stackoverflow.com/questions/21984405/relation-between-sigma-and-radius-on-the-gaussian-blur
blurOutput = cv2.GaussianBlur(img,(5,5),1.288)
hls = cv2.cvtColor(blurOutput, cv2.COLOR_RGB2HLS).astype(np.float)
# hls thresholds
lower = np.array([0,102,209],dtype = "uint8")
upper = np.array([20.5, 180, 255],dtype = "uint8")
#TODO fix this ugly logic
hls_mask = np.where(np.logical_and(\
np.logical_and(\
np.logical_and(hls[:,:,0] > lower[0] , hls[:,:,0] < upper[0]),\
np.logical_and(hls[:,:,1] > lower[1] , hls[:,:,1] < upper[1])), \
np.logical_and(hls[:,:,2] > lower[2] , hls[:,:,2] < upper[2])), \
1, 0 )
bgr_lower = [0, 0, 235];
bgr_upper = [131, 191, 255];
img = img[:, :, ::-1]
#TODO fix this ugly logic
bgr_mask = np.where(np.logical_and(\
np.logical_and(\
np.logical_and(img[:,:,0] > bgr_lower[0] , img[:,:,0] < bgr_upper[0]),\
np.logical_and(img[:,:,1] > bgr_lower[1] , img[:,:,1] < bgr_upper[1])), \
np.logical_and(img[:,:,2] > bgr_lower[2] , img[:,:,2] < bgr_upper[2])), \
1, 0 )
mask = np.logical_and(bgr_mask, hls_mask)
im2, contours, hierarchy = cv2.findContours(mask.astype(np.uint8)*255,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
hull = cv2.convexHull(contours[0])
#TODO this is pretty gross
# comment/uncomment next 4 lines for debug help
# img = img[:, :, ::-1]
# plt.imshow(img)
# plt.show()
# print(hull[0])
return hull[0]
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Please enter a file name")
else:
filename = sys.argv[1]
tiger_process(filename)
|
994,697 | 0ca1a34e372bf5c4fb314a7f0e84e752482b3324 | def raizQuadrada(a):
import math
return math.sqrt(a) |
994,698 | bcd02749e856a94009cf8c970b05674a5adc41ce | from PyQt5.Qt import * # 包含一些常用类的汇总
import sys
# 1. 创建一个应用程序对象
app = QApplication(sys.argv) # sys.argv接受外部参数
# 2. 控件的操作
# 2.1 创建控件
window = QWidget()
# 2.2 设置控件
window.resize(500, 500)
# 设置窗口标题名称
window.setWindowTitle("顶层窗口设置")
print(window.windowTitle()) # 获取窗口标题文本
# 设置窗口图标
icon = QIcon('ooo.png')
window.setWindowIcon(icon)
print(window.windowIcon()) # 获取窗口图标对象
# 设置窗口不透明度
window.setWindowOpacity(0.9)
print(window.windowOpacity()) # 获取窗口不透明度
# 窗口大小状态
print(window.windowState() == Qt.WindowNoState) # 窗口状态是否处于默认状态(无状态)
# window.setWindowState(Qt.WindowMinimized) # 设置窗口最小化
# window.setWindowState(Qt.WindowFullScreen) # 设置窗口全屏
print(window.windowState() == Qt.WindowNoState) # 窗口状态是否处于默认状态(无状态)
# 多窗口情况
# 创建额外的窗口
w2 = QWidget()
w2.setWindowTitle('w2')
# 2.3 展示控件
window.show()
w2.show()
window.setWindowState(Qt.WindowActive) # 设置窗口活跃,置顶窗口
# 3. 应用程序的执行,进入消息循环
sys.exit(app.exec_())
|
994,699 | cba852340b78b12253f21db4a08074edb3968ecf | # Generated by Django 3.0.8 on 2020-08-18 21:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('team', '0003_auto_20200819_0137'),
]
operations = [
migrations.CreateModel(
name='matches',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('match_winner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='winner', to='team.teamform')),
('team_one', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='home_team', to='team.teamform')),
('team_two', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='away_team', to='team.teamform')),
],
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.