index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,500 | 1ccaedb6e79101764db1907634ba627a0f9f2bb2 | class Solution(object):
def maxSubArrayLen(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
sums = [0] * (len(nums) + 1)
seen = {}
seen[0] = -1
res = 0
for idx, n in enumerate(nums):
sums[idx + 1] = sums[idx] + n
if sums[idx + 1] - k in seen:
res = max(res, idx - seen[sums[idx + 1] - k])
if sums[idx + 1] not in seen:
seen[sums[idx + 1]] = idx
return res
|
5,501 | 9539d2a4da87af1ff90b83bbcf72dfc8ab7b6db0 | """Unit tests for the `esmvalcore.preprocessor._rolling_window` function."""
import unittest
import iris.coords
import iris.exceptions
import numpy as np
from cf_units import Unit
from iris.cube import Cube
from numpy.testing import assert_equal
from esmvalcore.preprocessor._rolling_window import rolling_window_statistics
def _create_2d_cube():
cube = Cube(np.broadcast_to(np.arange(1, 16), (11, 15)),
var_name='tas',
units='K')
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(-5, 6),
standard_name='latitude',
units=Unit('degrees'),
), 0)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(1, 16),
standard_name='time',
units=Unit('days since 1950-01-01 00:00:00', calendar='gregorian'),
), 1)
return cube
class TestRollingWindow(unittest.TestCase):
"""Test class for _rolling_window."""
def setUp(self):
"""Prepare cube for tests."""
self.cube = _create_2d_cube()
def test_rolling_window_time(self):
"""Test rolling_window_statistics over time coordinate."""
cube_time_sum = rolling_window_statistics(self.cube,
coordinate='time',
operator='sum',
window_length=2)
expected_data = np.broadcast_to(np.arange(3, 30, 2), (11, 14))
assert_equal(cube_time_sum.data, expected_data)
assert cube_time_sum.shape == (11, 14)
def test_rolling_window_latitude(self):
"""Test rolling_window_statistics over latitude coordinate."""
cube_lat_mean = rolling_window_statistics(self.cube,
coordinate='latitude',
operator='mean',
window_length=3)
expected_data = np.broadcast_to(np.arange(1, 16), (9, 15))
assert_equal(cube_lat_mean.data, expected_data)
assert cube_lat_mean.shape == (9, 15)
def test_rolling_window_coord(self):
self.cube.remove_coord('latitude')
with self.assertRaises(iris.exceptions.CoordinateNotFoundError):
rolling_window_statistics(self.cube,
coordinate='latitude',
operator='mean',
window_length=3)
def test_rolling_window_operator(self):
with self.assertRaises(ValueError):
rolling_window_statistics(self.cube,
coordinate='time',
operator='percentile',
window_length=2)
if __name__ == '__main__':
unittest.main()
|
5,502 | 1530f1711be6313b07df680721daf4cb0a84edc0 | # ------------------------------------------------------------
# calclex.py
#
# tokenizer for a simple expression evaluator for
# numbers and +,-,*,/
# ------------------------------------------------------------
import ply.lex as lex
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LKEY = r'\{'
t_RKEY = r'\}'
t_LBRACKET= r'\['
t_RBRACKET= r'\]'
t_TERMINAL= r';'
t_COMMA = r','
t_GREATHAN= r'\<'
t_LESSTHAN= r'\>'
t_DOT = r'\.'
t_TWODOTS = r':'
t_DIFERENT= r'\<\>'
t_EQUAL = r'='
t_TWOEQUAL= r'=='
#all the reserved words
reserved = {
'if' : 'IF',
'then' : 'THEN',
'else' : 'ELSE',
'while' : 'WHILE',
'int' : 'INT',
'float' : 'FLOAT',
'bool' : 'BOOL',
'double' : 'DOUBLE',
'char' : 'CHAR',
'public' : 'PUBLIC',
'private' : 'PRIVATE',
'loop' : 'LOOP',
'function' : 'FUNCTION',
'main' : 'MAIN',
'var' : 'VARS',
'print' : 'PRINT'
}
# List of token names. This is always required
tokens = ['NUMBER','PLUS','MINUS','TIMES','DIVIDE','LPAREN','RPAREN','LKEY','RKEY','LBRACKET','RBRACKET','TERMINAL','ID','COMMA','GREATHAN','LESSTHAN','DOT','TWODOTS','DIFERENT','EQUAL','TWOEQUAL'] + list(reserved.values())
#s reqgular exprsion that takes the fisrts leter then another letter or a number
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID') # Check for reserved words
return t
#get al the comments that are with #
def t_COMMENT(t):
r'\#.*'
pass
# No return value. Token discarded
def t_FLOAT(t):
r'\d+\.\d+'
t.value=float(t.value)
return t
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# Compute column.
# input is the input text string
# token is a token instance
def find_column(input,token):
last_cr = input.rfind('\n',0,token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
column = find_column(t.value[0],t)
print "Illegal character"+t.value[0] +" in column '%d' and on line " %column
t.lexer.skip(1)
# Build the lexer
lexer = lex.lex()
# Test it out
data = '''
int main () {
int a= 123.1234;
int b =123412341;
}
'''
# Give the lexer some input
lexer.input(data)
# Tokenize
while True:
tok = lexer.token()
if not tok: break # No more input
print tok
|
5,503 | 8e1de62f2490d2276a834ae1ab0f1958649fa821 | # 938. Range Sum of BST
# Share
# Given the root node of a binary search tree, return the sum of values of all nodes with value between L and R (inclusive).
# The binary search tree is guaranteed to have unique values.
# Example 1:
# Input: root = [10,5,15,3,7,null,18], L = 7, R = 15
# Output: 32
# Example 2:
# Input: root = [10,5,15,3,7,13,18,1,null,6], L = 6, R = 10
# Output: 23
# Note:
# The number of nodes in the tree is at most 10000.
# The final answer is guaranteed to be less than 2^31.
# class Solution:
# def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
# result = self.cal_sum(root, L, R, 0)
# return result
# def cal_sum(self, root, L, R, result):
# if not root:
# return result
# left = self.cal_sum(root.left, L, R, result)
# right = self.cal_sum(root.right, L, R, result)
# if root.val < L or root.val > R:
# return left + right
# return left + right + root.val
# Better Solution
class Solution:
def rangeSumBST(self, root: TreeNode, L: int, R: int) -> int:
result = self.cal_sum(root, L, R, 0)
return result
def cal_sum(self, root, L, R, result):
if not root:
return result
left = 0 if root.val < L else self.cal_sum(root.left, L, R, result)
right = 0 if root.val > R else self.cal_sum(root.right, L, R, result)
if root.val < L or root.val > R:
return left + right
return left + right + root.val |
5,504 | d0f83e3b7eb5e1bc81a56e46043f394757437af8 | from django.db import models
# Create your models here.
class GeneralInformation(models.Model):
name = models.CharField(max_length=100)
address = models.TextField()
city = models.CharField(max_length=20)
class Meta:
ordering = ['name']
def __str__(self):
return "{} {} {}".format(self.name, self.address, self.city)
|
5,505 | 5251724656e1d971900fff3d8fa0210c6cfc27bb | n=int(0)
import random
def doubleEven(n):
if n % 2 == 0:
n = n*2
return (n)
else:
return "-1"
print(doubleEven(n = int(input("put in a number"))))
g=int(0)
def grade(g):
if g < 50:
return "F"
if g < 66:
return "C"
if g > 92:
return "A+"
else:
print("error")
print(grade(g = int(input("put in your percent"))))
num1 = 0
num2 = 0
num3 = 0
def largestNum(num1, num2, num3):
num1 = int(input("input number 1"))
num2 = int(input("input number 2"))
num3 = int(input("input number 3"))
if num1 > num2:
if num1 > num3:
return num1
if num3 > num1:
return num3
if num2 > num3:
return num2
if num3 > num2:
return num3
print(largestNum(10, 20, 30))
def sumDice(Dice, numRolls): |
5,506 | 53fd020946a2baddb1bb0463d2a56744de6e3822 | #List methods allow you to modify lists. The following are some list methods for you to practice with. Feel free to google resources to help you with this assignment.
#append(element) adds a single element to the list
#1. 'Anonymous' is also deserving to be in the hacker legends list. Add him in to the hacker legends list and print your results.
hacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo', 'Jonathan James', 'Kevin Poulsen']
hacker_legends.append('Anonymous')
print(hacker_legends)
#insert (index, element) adds a new element at any position in your list.
#2. You just created a networking study list and forgot to add in 'SSH'. Please add that into the 3rd position in the networking list and print your results.
networking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']
networking.insert(3, 'SSH')
print(networking)
#remove(element) removes a single element from the list
#3. The cyber security analyst entered the wrong IP address in the list below. Please remove the non-float integer from the ip addy list and print your results.
ip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]
ip_addy.remove(5102018)
print(ip_addy)
#pop(index) removes the element at the given index position
#4. The cyber traits list below is a list of traits that fit a career in cyber security. Everything is accurate, except for 'lazy'. Please remove 'lazy' from the list and print your results.
cyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent', 'curious', 'instinctive']
cyber_traits.pop(2)
print(cyber_traits)
#extend(list) adds elements from another list
#5. Combine the new co list with the sec co list and print your results.
sec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']
new_co= ['Checkp Point Software', 'Palo Alto Networks', 'Symantec', 'Trend Micro']
sec_co.extend(new_co)
print(sec_co)
#index(element) searches an element in the list and returns its index
#6. There were some headline grabbing cyber attacks in 2017. In the cyber attacks list below, find the index position of 'WannaCry' and print your result.
cyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!','WannaCry', 'Deep Root Analytics']
print(cyber_attacks[3])
#count(element) counts how many times an element is in a list
#7. In the dns list below, find the number of ocurrence for 98.105 and print your results.
dns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]
print(dns_list.count(98.105))
#reverse() reverses the elements of a given list
#8. Decipher Mr. Robot's quote using the reverse method and print his message.
mr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a', 'just', 'never', 'is', 'bug', 'a']
mr_robot.reverse()
print(mr_robot)
#sort () sorts elements of a given list in a specific order (ascending or descending)
#9 Sort the following list of SSH Ids in ascending order
ssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, 1331901032.03789, 1331903508.24007, 1331903476.8]
ssh_list.sort()
print(ssh_list)
#print the list in descending order
ssh_list.sort(reverse=True)
print(ssh_list)
#max() returns the largest element in the list
#10 Find the largest integer in the network list below:
network_list = [39104, 38694, 38702, 38787, 39860]
print(max(network_list))
#min() returns the smallest element in the list
#11 Find the smallest integet in the network list below:
network_list = [39104, 38694, 38702, 38787, 39860]
print(min(network_list))
#sum() calculates the sum of the all the elements in the list
#12 Find the sum of the following occurence list below:
occurences = [3, 2.5, 9, 7, 21, 6, 8]
print(sum(occurences))
|
5,507 | a1d1056f302cf7bc050537dd8cc53cdb2da7e989 |
#calss header
class _PULPIER():
def __init__(self,):
self.name = "PULPIER"
self.definitions = pulpy
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['pulpy']
|
5,508 | 4ae611ee8c019c76bb5d7c1d733ffb4bd06e2e8d | # import random module from Python standard library
# define a dictionary with image urls and number of flucks
# set the served img variable to be a random element from imgs
# hints:
# to put dict keys in a list: list(dict.keys())
# to choose a random item from a list: random.choice(lst)
# keep asking user if they want to fluck the image until
# they say either 'yes' or 'no'
# if they say 'yes', output a message and increment the flucks
# if they say 'no', serve another image?
# repeat process for another image...
# hint: group blocks of task-specific code into functions?
import random
imgs = {"img_1":1,"img_2":2,"img_3":3,"img_4":4}
img = imgs.keys()
random.choice(imgs)
served_img = imgs[random.randrange(0,len(imgs)-1)]
print(served_img)
input = raw_input("Would you like to fluck it?!")
if input == "yes":
print("YOU FLUCKED IT")
elif input == "no":
print("WHAT ARE YOU???..")
|
5,509 | 0470f98247f8f835c0c052b01ddd7f1f7a515ab5 | #-*- coding:utf-8 -*-
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
_exception = None
import os
class xmlSp:
def addNode(self,parentNode,childNode):
parentNode.append(childNode)
def createChildNode(self,key,value,propertyMap={}):
element = Element(key,propertyMap)
element.text = value
return element
def fetchXmlNodeTree(self,xmlPathOrXmlStr):#Load xml has 2 ways.First:load xml string.Second:load xml file.
if(xmlPathOrXmlStr == ""):
return None
elif(os.path.isfile(xmlPathOrXmlStr)):#is xmlPath
return ElementTree.parse(xmlPathOrXmlStr)
else:#is xmlStr
return ElementTree.fromstring(xmlPathOrXmlStr)
def fetchSingleNode(self,nodeTree,xpathOrKey):#If the node that is same name is more,return first node.
if xpathOrKey == None or xpathOrKey == "":
return None
elif len(xpathOrKey.split('/')) > 1:#is xpath
return nodeTree.find(xpathOrKey)#find is faster than findall then return first
else:#is key
nodeList = nodeTree.getiterator(xpathOrKey)
if nodeList == None or len(nodeList) <= 0:
return nodeList
else:
return nodeList[0]
def fetchSingleNodeValue(self,nodeTree,xpathOrKey):#If the node that is same name is more,return first node.
node = self.fetchSingleNode(nodeTree,xpathOrKey)
if node == None or len(node) <= 0 or node == "":
return ""
else:
return node.text
def fetchNodeList(self,nodeTree,xpathOrKey):
if xpathOrKey == None or xpathOrKey == "":
return None
elif len(xpathOrKey.split('/')) > 1:#is xpath
return nodeTree.findall(xpathOrKey)
else:#is key
return nodeTree.getiterator(xpathOrKey)
def fetchNodeValueList(self,nodeTree,xpathOrKey,key=""):#If xpathOrKey is xpath,key must be not empty.Otherwise return empty set
if xpathOrKey == None or xpathOrKey == "":
return None
else:
nodeValueList = []
nodeList = self.fetchNodeList(nodeTree,xpathOrKey)
for node in nodeList:
if node.tag == xpathOrKey:
nodeValueList.append(node.text)
return nodeValueList
def format(self,sourceXmlPath,destXmlPath,charset='UTF-8'):
global _exception
_exception = None
if os.path.exists(sourceXmlPath):
try:
fileRead = open(sourceXmlPath,'r',encoding=charset)
fileWrite = open(destXmlPath,'w',encoding=charset)
lines = fileRead.read()
nodeList=[]
self.__writeXmlStruct(lines,nodeList,fileWrite)
fileRead.close()
fileWrite.close()
return True
except BaseException as error:
_exception = error
return False
else:
_exception = BaseException('File not exist!')
return False
def __writeXmlStruct(self,xmlStr,nodeList,fileWrite):
xmlStr=xmlStr.replace('\n','')
xmlStruct1=self.__analyNodeFlag(xmlStr)
if xmlStruct1!=None:
xmlNode1=xmlStruct1[0]
xmlRestStr1=xmlStruct1[1]
xmlStruct2=self.__analyNodeFlag(xmlRestStr1)
xmlNode2=xmlStruct2[0]
xmlRestStr2=xmlStruct2[1]
xmlInnerTextEnd=xmlRestStr1.find(xmlNode2)
xmlInnerText=xmlRestStr1[:xmlInnerTextEnd]
isPair=self.__checkNodeFlagIsPair(xmlNode1,xmlNode2)
nodeName1=self.__fetchNodeNameFromStr(xmlNode1)
nodeName2=self.__fetchNodeNameFromStr(xmlNode2)
if not (nodeName1 in nodeList):
nodeList.append(nodeName1)
if not (nodeName2 in nodeList):
nodeList.append(nodeName2)
nodeName1Floor=nodeList.index(nodeName1,0)
nodeName2Floor=nodeList.index(nodeName2,0)
space=''
if len(xmlNode1)>0:
if isPair:
for index in range(nodeName1Floor):
xmlNode1=space+xmlNode1
fileWrite.write(xmlNode1+'\n')
if len(xmlInnerText)>0:
if isPair:
for index in range(nodeName1Floor+1):
xmlInnerText=space+xmlInnerText
fileWrite.write(xmlInnerText+'\n')
if len(xmlNode2)>0:
for index in range(nodeName2Floor):
xmlNode2=space+xmlNode2
fileWrite.write(xmlNode2+'\n')
self.__writeXmlStruct(xmlRestStr2,nodeList,fileWrite)
def __analyNodeFlag(self,sourceStr):
global _exception
_exception=None
try:
nodeBegin = sourceStr.find('<')
nodeEnd = str(sourceStr).find('>')
if nodeBegin >= 0 and nodeEnd > 0:
node =sourceStr[nodeBegin:nodeEnd+1]
nodeInnerText=sourceStr[nodeEnd+1:]
return [node,nodeInnerText]
else:
return ["",sourceStr]
except BaseException as error:
_exception=error
return None
def __checkNodeFlagIsPair(self,nodeFlag1,nodeFlag2):
if len(nodeFlag1)>0 and len(nodeFlag2)>0:
nodeFlag1=nodeFlag1[1:(len(nodeFlag1)-2)]
nodeFlag2=nodeFlag2[1:(len(nodeFlag2)-2)]
nodeFlag1=nodeFlag1.replace('/','')
nodeFlag2=nodeFlag2.replace('/','')
if nodeFlag1==nodeFlag2:
return True
return False
def __fetchNodeNameFromStr(self,str):
str=str[1:(len(str)-1)]
nodeName=str.replace('/','')
return nodeName
def modifyNodeValue(self,node,newValue, isAppend=False):
if(node == None):
return False
else:
try:
if isAppend:
node.text += newValue
else:
node.text = newValue
return True
except:
return False
def writeXml(self,nodeTree, outPath,charset="utf-8"):
global _exception
_exception=None
try:
nodeTree.write(outPath, encoding=charset)
return True
except BaseException as error:
_exception=error
return False
#import os
#if __name__ == '__main__':
# myxml = xmlSp()
# formatResult = myxml.format("1.txt","2.txt")
# if not formatResult:
# print(_exception)
# else:
# os.remove("1.txt")
# os.rename('2.txt','1.txt')
## xmlPath= "..\\article\\articleList.xml";
## nodeTree = myxml.fetchXmlNodeTree(xmlPath)
## #nodeTree=
## #myxml.fetchXmlNodeTree("<artilceList><article><id>aaaa</id></article></artilceList>")
## #node=myxml.fetchSingleNode(nodeTree,'article/id')
## #if len(node)<=0:
## # print("empty")
## #print(node)
## #nodeList = myxml.fetchNodeList(nodeTree,'id')
## #myxml.modifyNodeValue(nodeList[0],'bbbb')
## #myxml.writeXml(nodeTree,xmlPath)
## #rootNode=myxml.fetchSingleNode(nodeTree,'articleList')
## #idNode=myxml.createChildNode('id','aaabbbb')
## #nameNode=myxml.createChildNode('name','aaabbbb')
## #parentNode=myxml.createChildNode('article','')
## #myxml.addNode(parentNode,idNode)
## #myxml.addNode(parentNode,nameNode)
## #myxml.addNode(rootNode,parentNode)
## #myxml.writeXml(nodeTree,'aaa.xml')
## #for node in nodeList:
## # print("node:%s" %node)
## #nodeValueSet=fetchNodeValueSet(nodeTree,'article/id')
## #for nodeValue in nodeValueSet:
## # print ("nodeValue:%s" %nodeValue)
#import os
#os.system("PAUSE")
|
5,510 | 4d31985cf1266619406d79a7dbae269c10f21bda | import world
import items
class Quest:
def __init__(self):
raise NotImplementedError("Do not create raw quest classes")
def __str__(self):
return self.quest_name
def give_reward(self, player):
print("You receive: \n{} gold\n{} exp".format(self.reward_gold, self.reward_exp))
for item in self.reward_item:
print(item)
player.gold += self.reward_gold
player.exp += self.reward_exp
for item in self.reward_item:
player.item_inventory.append(item)
self.complete = True
class NoobQuest(Quest):
def __init__(self):
self.quest_status = 0
self.quest_name = "Kill the Rat!"
self.reward_gold = 250
self.reward_exp = 500
self.reward_item = [items.Longsword()]
self.quest_log = []
self.complete = False
def print_quest_log(self):
print("Quest: {}".format(self.quest_name))
for n, q in enumerate(self.quest_log, 1):
print("{}: {}".format(n, q))
def update_quest_log(self, quest_log_text):
self.quest_log.append(quest_log_text)
''' #### Working on new quest architecture ####
class QuestObject:
def __init__(self):
self.quest_status = 0
self.complete_status = 0
self.quest_name = "Quest Name"
self.reward_gold = 0
self.reward_exp = 0
self.reward_item = []
self.quest_logs = []
self.player_log = []
self.complete = False
def __str__(self):
return self.quest_name
def give_reward(self, player):
if self.complete:
print("You tried to get rewards twice! Something broke!")
return
print("You completed the quest: {}".format(self.quest_name))
print("Here is your reward:")
for item in self.reward_item:
print("* {}".format())
player.item_inventory.append(item)
print("* {} Gold\n* {} Exp".format(self.reward_gold, self.reward_exp))
player.gold += self.reward_gold
player.exp += self.reward_exp
self.complete = True
def set_quest_status(self, status):
self.quest_status = status
def update_player_log(self, index):
self.player_log.append(self.quest_logs[index])
def can_be_completed(self):
return self.quest_status == self.complete_status
'''
|
5,511 | 399097ef7cfdc061b307c3cc29615c9f50b1e6bf | from utils.gradient_strategy.dct_generator import DCTGenerator
from utils.gradient_strategy.random_generator import RandomGenerator
from utils.gradient_strategy.upsample_generator import UpSampleGenerator
from utils.gradient_strategy.centerconv_generator import CenterConvGenerator
from utils.attack_setting import *
from utils.construct_model_data import construct_model_and_data
from utils.generate_model import ImageModel
from utils.generate_video import video
from utils.load_data import ImageData, split_data
from utils.show_or_save import *
from utils.gradient_strategy.centerconv_generator import CenterConvGenerator |
5,512 | 117b340b13b9b1c53d3df1646cd5924f0118ab5d | #Small enough? - Beginner
# You will be given an array and a limit value.
# You must check that all values in the array are
# below or equal to the limit value. If they are,
# return true. Else, return false.
def small_enough(array, limit):
counter = ""
for arr in array:
if arr <= limit:
counter += "True,"
else:
counter += "False,"
if "False" in counter:
return False
else:
return True |
5,513 | 70b26052d9516fd067ff71074a6dc4c58ace7d80 | # 選択肢が書き換えられないようにlistではなくtupleを使う
chose_from_two = ('A', 'B', 'C')
answer = []
answer.append('A')
answer.append('C')
print(chose_from_two)
# ('A', 'B', 'C')
print(answer)
# ['A', 'C'] |
5,514 | 9928eaa32468453f405d8bb650f3e0e85a7933bf | import os
import cv2
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision import transforms
from utils.utils import loadYaml
from .base_datalayer import BaseDataLayer
import albumentations as albu
class Datalayer(BaseDataLayer):
def __init__(self, config, augmentation=None, preprocessing=None):
super(Datalayer, self).__init__()
self.config = config
train_dir = self.config['Dataset']['TrainPath']
bg_imgs_dir = os.path.join(train_dir, 'bg')
mask_suffix = '_mask.png'
img_suffix = '.png'
self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for bg_mask_name in os.listdir(bg_imgs_dir) if
bg_mask_name.endswith(mask_suffix)]
self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for bg_mask_path in self.bg_masks_path]
ng_imgs_dir = os.path.join(train_dir, 'ng')
self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for ng_img_name in os.listdir(ng_imgs_dir) if
ng_img_name.endswith(mask_suffix)]
self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for ng_mask_path in self.ng_masks_path]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __len__(self):
return len(self.bg_masks_path) + len(self.ng_masks_path)
def __getitem__(self, item):
# bg
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg], self.bg_masks_path[random_id_bg]
# ng
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
|
5,515 | 855bfc9420a5d5031cc673231cc7993ac67df076 | import numpy as np
import h5py
def rotate_z(theta, x):
theta = np.expand_dims(theta, 1)
outz = np.expand_dims(x[:, :, 2], 2)
sin_t = np.sin(theta)
cos_t = np.cos(theta)
xx = np.expand_dims(x[:, :, 0], 2)
yy = np.expand_dims(x[:, :, 1], 2)
outx = cos_t * xx - sin_t * yy
outy = sin_t * xx + cos_t * yy
return np.concatenate([outx, outy, outz], axis=2)
def augment(x):
bs = x.shape[0]
# rotation
min_rot, max_rot = -0.1, 0.1
thetas = np.random.uniform(min_rot, max_rot, [bs, 1]) * np.pi
rotated = rotate_z(thetas, x)
# scaling
min_scale, max_scale = 0.8, 1.25
scale = np.random.rand(bs, 1, 3) * (max_scale - min_scale) + min_scale
return rotated * scale
def standardize(x):
clipper = np.mean(np.abs(x), (1, 2), keepdims=True)
z = np.clip(x, -100 * clipper, 100 * clipper)
mean = np.mean(z, (1, 2), keepdims=True)
std = np.std(z, (1, 2), keepdims=True)
return (z - mean) / std
class ModelFetcher(object):
def __init__(
self,
fname,
batch_size,
down_sample=10,
do_standardize=True,
do_augmentation=False,
):
self.fname = fname
self.batch_size = batch_size
self.down_sample = down_sample
with h5py.File(fname, "r") as f:
self._train_data = np.array(f["tr_cloud"])
self._train_label = np.array(f["tr_labels"])
self._test_data = np.array(f["test_cloud"])
self._test_label = np.array(f["test_labels"])
self.num_classes = np.max(self._train_label) + 1
self.num_train_batches = len(self._train_data) // self.batch_size
self.num_test_batches = len(self._test_data) // self.batch_size
self.prep1 = standardize if do_standardize else lambda x: x
self.prep2 = (
(lambda x: augment(self.prep1(x))) if do_augmentation else self.prep1
)
assert (
len(self._train_data) > self.batch_size
), "Batch size larger than number of training examples"
# select the subset of points to use throughout beforehand
self.perm = np.random.permutation(self._train_data.shape[1])[
:: self.down_sample
]
def train_data(self):
rng_state = np.random.get_state()
np.random.shuffle(self._train_data)
np.random.set_state(rng_state)
np.random.shuffle(self._train_label)
return self.next_train_batch()
def next_train_batch(self):
start = 0
end = self.batch_size
N = len(self._train_data)
perm = self.perm
batch_card = len(perm) * np.ones(self.batch_size, dtype=np.int32)
while end < N:
yield self.prep2(
self._train_data[start:end, perm]
), batch_card, self._train_label[start:end]
start = end
end += self.batch_size
def test_data(self):
return self.next_test_batch()
def next_test_batch(self):
start = 0
end = self.batch_size
N = len(self._test_data)
batch_card = (self._train_data.shape[1] // self.down_sample) * np.ones(
self.batch_size, dtype=np.int32
)
while end < N:
yield self.prep1(
self._test_data[start:end, 1 :: self.down_sample]
), batch_card, self._test_label[start:end]
start = end
end += self.batch_size
|
5,516 | 22792937415a8ee4cecff2a9683c435abe54bdab | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2020- Spyder Project Contributors
#
# Released under the terms of the MIT License
# ----------------------------------------------------------------------------
"""Tests for the execution of pylint."""
# Standard library imports
from io import open
import os.path as osp
from unittest.mock import Mock, MagicMock
# Third party imports
import pytest
from qtpy.QtCore import Signal, QObject
# Local imports
from spyder.plugins.pylint.plugin import Pylint
from spyder.plugins.pylint.widgets.pylintgui import PylintWidget
from spyder.plugins.pylint.utils import get_pylintrc_path
# pylint: disable=redefined-outer-name
PYLINTRC_FILENAME = ".pylintrc"
# Constants for dir name keys
# In Python 3 and Spyder 5, replace with enum
NO_DIR = "e"
SCRIPT_DIR = "SCRIPT_DIR"
WORKING_DIR = "WORKING_DIR"
PROJECT_DIR = "PROJECT_DIR"
HOME_DIR = "HOME_DIR"
ALL_DIR = "ALL_DIR"
DIR_LIST = [SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]
DIR_LIST_ALL = [NO_DIR] + DIR_LIST + [ALL_DIR]
PYLINT_TEST_SCRIPT = "import math\nimport os\nimport sys\n" + "\n".join(
[dir_name + " = " + str(idx) for idx, dir_name in enumerate(DIR_LIST_ALL)])
PYLINT_TEST_SCRIPT = "\"\"\"Docstring.\"\"\"\n" + PYLINT_TEST_SCRIPT + "\n"
PYLINTRC_TEST_CONTENTS = """
[MESSAGES CONTROL]
enable=blacklisted-name
[BASIC]
bad-names={bad_names}
good-names=e
"""
class MainWindowMock(QObject):
sig_editor_focus_changed = Signal(str)
def __init__(self):
super(MainWindowMock, self).__init__(None)
self.editor = Mock()
self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed
self.projects = MagicMock()
@pytest.fixture
def pylintrc_search_paths(tmp_path_factory):
"""Construct temporary .pylintrc search paths."""
search_paths = {dir_name: str(tmp_path_factory.mktemp(dir_name))
for dir_name in DIR_LIST}
return search_paths
@pytest.fixture
def pylint_test_script(pylintrc_search_paths):
"""Write a script for testing Pylint to a temporary directory."""
script_path = osp.join(
pylintrc_search_paths[SCRIPT_DIR], "test_script.py")
with open(script_path, mode="w",
encoding="utf-8", newline="\n") as script_file:
script_file.write(PYLINT_TEST_SCRIPT)
return script_path
@pytest.fixture
def pylint_test_scripts(pylintrc_search_paths):
def _pylint_test_scripts(filenames):
"""Write scripts for testing Pylint to a temporary directory."""
script_paths = []
for filename in filenames:
script_path = osp.join(
pylintrc_search_paths[SCRIPT_DIR], filename)
with open(script_path, mode="w",
encoding="utf-8", newline="\n") as script_file:
script_file.write(PYLINT_TEST_SCRIPT)
script_paths.append(script_path)
return script_paths
return _pylint_test_scripts
@pytest.fixture(
params=[
[], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [HOME_DIR],
[SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR],
[SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR],
[SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]],
ids=["None", "Script", "Working", "Project", "Home", "Script & Home",
"Working & Project", "Script & Working", "Project & Home", "All"])
def pylintrc_files(pylintrc_search_paths, request):
"""Store test .pylintrc files at the paths and determine the result."""
search_paths = pylintrc_search_paths
# Determine the bad names that should be reported
pylintrc_locations = request.param
bad_names = [ALL_DIR]
for search_path_name, search_path in search_paths.items():
if search_path_name in pylintrc_locations:
expected_path = osp.join(search_path, PYLINTRC_FILENAME)
bad_names += [search_path_name]
break
else:
expected_path = None
bad_names = [NO_DIR]
# Store the selected pylintrc files at the designated paths
for location in pylintrc_locations:
pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(
bad_names=", ".join([location, ALL_DIR]))
pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)
with open(pylintrc_path, mode="w",
encoding="utf-8", newline="\n") as rc_file:
rc_file.write(pylintrc_test_contents)
return search_paths, expected_path, bad_names
def test_get_pylintrc_path(pylintrc_files, mocker):
"""Test that get_pylintrc_path finds the expected one in the hiearchy."""
search_paths, expected_path, __ = pylintrc_files
mocker.patch("pylint.config.os.path.expanduser",
return_value=search_paths[HOME_DIR])
actual_path = get_pylintrc_path(
search_paths=list(search_paths.values()),
home_path=search_paths[HOME_DIR],
)
assert actual_path == expected_path
def test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):
"""Test that pylint works without errors with no project open."""
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=None)
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.analyze(filename=pylint_test_script)
qtbot.waitUntil(
lambda: pylint_widget.get_data(pylint_test_script)[1] is not None,
timeout=5000)
pylint_data = pylint_widget.get_data(filename=pylint_test_script)
print(pylint_data)
assert pylint_data
assert pylint_data[0] is not None
assert pylint_data[1] is not None
def test_pylint_widget_pylintrc(
pylint_test_script, pylintrc_files, mocker, qtbot):
"""Test that entire pylint widget gets results depending on pylintrc."""
search_paths, __, bad_names = pylintrc_files
mocker.patch("pylint.config.os.path.expanduser",
return_value=search_paths[HOME_DIR])
mocker.patch("spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home",
return_value=search_paths[WORKING_DIR])
mocker.patch("spyder.plugins.pylint.widgets.pylintgui.osp.expanduser",
return_value=search_paths[HOME_DIR])
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=search_paths[PROJECT_DIR])
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.analyze(filename=pylint_test_script)
qtbot.waitUntil(
lambda: pylint_widget.get_data(pylint_test_script)[1] is not None,
timeout=5000)
pylint_data = pylint_widget.get_data(filename=pylint_test_script)
print(pylint_data)
assert pylint_data
conventions = pylint_data[1][3]["C:"]
assert conventions
assert len(conventions) == len(bad_names)
assert all([sum([bad_name in message[2] for message in conventions]) == 1
for bad_name in bad_names])
def test_pylint_max_history_conf(pylint_test_scripts, mocker):
"""Regression test for checking max_entries configuration.
For further information see spyder-ide/spyder#12884
"""
# Create the pylint widget for code analysis
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=None)
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.filecombo.clear()
script_0, script_1, script_2 = pylint_test_scripts(
["test_script_{}.py".format(n) for n in range(3)])
# Change the max_entry to 2
pylint_widget.parent.set_option('max_entries', 2)
pylint_widget.change_history_limit(2)
assert pylint_widget.parent.get_option('max_entries') == 2
# Call to set_filename
pylint_widget.set_filename(filename=script_0)
assert pylint_widget.filecombo.count() == 1
# Add to more filenames
pylint_widget.set_filename(filename=script_1)
pylint_widget.set_filename(filename=script_2)
assert pylint_widget.filecombo.count() == 2
assert 'test_script_2.py' in pylint_widget.curr_filenames[0]
assert 'test_script_1.py' in pylint_widget.curr_filenames[1]
# Change the max entry to 1
pylint_widget.parent.set_option('max_entries', 1)
pylint_widget.change_history_limit(1)
assert pylint_widget.filecombo.count() == 1
assert 'test_script_2.py' in pylint_widget.curr_filenames[0]
if __name__ == "__main__":
pytest.main([osp.basename(__file__), '-vv', '-rw'])
|
5,517 | 520b9246c3c617b18ca57f31ff51051cc3ff51ca | from abc import ABC, abstractmethod
class Shape(ABC): # Shape is a child class of ABC
@abstractmethod
def area(self):
pass
@abstractmethod
def perimeter(self):
pass
class Square(Shape):
def __init__(self, length):
self.length = length
square = Square(4)
# this will code will not compile since abstarct methods have not been
# defined in the child class, Square
|
5,518 | c28d7fc45be9a6efa7b7ef00520898c3d238ac63 | a=raw_input("Enter the column\n")
b=raw_input("Enter the row\n")
i=0
k=0
m=0
c=""
d=""
while (m<int(b)):
while(i<int(a)):
c=c+" "
for j in xrange(1,4):
c=c+"-"
i=i+1
while(k<int(a)):
d=d+"|"
for l in xrange(1,4):
d=d+" "
k=k+1
m=m+1
print c
print d+"|"
print c
|
5,519 | b573db8ea0845fb947636b8d82ed462904c6005d | import boto3
from app.models import *
from app.config import *
from app.lib.log import save_races_to_db, save_laptimes_to_db
from app.utils.utils import get_sec
import pandas as pd
def import_csv_from_aws():
client = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)
client.download_file('ergast-csv','filtered_laptimes.csv','filtered_laptimes.csv')
client.download_file('ergast-csv','filtered_races.csv','filtered_races.csv')
df_lapTimes = pd.read_csv('filtered_laptimes.csv')
df_races = pd.read_csv('filtered_races.csv')
df_races['round'] = df_races['round'].astype(int)
df_races['season'] = df_races['season'].astype(int)
df_lapTimes.rename(columns={"driverId":"driverRef"}, inplace=True)
df_lapTimes = pd.merge(df_lapTimes, df_races[['raceId', 'raceName', 'season', 'round']], on=['raceId'], how='left')
df_lapTimes.fillna("0:00:00", inplace=True)
df_lapTimes['time'] = df_lapTimes['time'].map(lambda x: get_sec(x))
df_lapTimes = df_lapTimes[["driverRef", "season", "raceId", "raceName", "round", "lap", "time", "position"]]
df_lapTimes.rename(columns={"round":"roundId"}, inplace=True)
save_races_to_db(df_races, db.session)
for i, group in df_lapTimes.groupby("raceId"):
g = group.drop(["raceId"], axis=1)
b.session.bulk_insert_mappings(LapTimes, g.to_dict("records"))
db.session.commit()
|
5,520 | e58dbb4f67c93abf3564dc0f38df8852313338f0 | import time
import jax.numpy as jnp
def tick():
return time.perf_counter()
def tock(t0, dat=None):
if dat is not None:
try:
_ = dat.block_until_ready()
except AttributeError:
_ = jnp.array(dat).block_until_ready()
return time.perf_counter() - t0
|
5,521 | 083a9555f8db586fbb065d59e4e333bb16ee3d2a | import os
import sys
from subprocess import check_output
from charmhelpers.fetch import (
apt_install,
apt_update,
add_source,
)
from charmhelpers.core.templating import render
from charmhelpers.contrib.database.mysql import MySQLHelper
def install_mysql(package='mysql-server', sources=None, keys=None):
if not sources:
sources = []
if not keys:
keys = []
from subprocess import (
Popen,
PIPE,
)
for source in sources:
add_source(source)
if sources:
apt_update()
with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:
root_pass = rpw.read()
dconf = Popen(['debconf-set-selections'], stdin=PIPE)
dconf.stdin.write("%s %s/root_password password %s\n" % (package, package,
root_pass))
dconf.stdin.write("%s %s/root_password_again password %s\n" % (package,
package,
root_pass))
dconf.communicate()
dconf.wait()
apt_install(package)
def build_mycnf(cfg):
i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')
# REFACTOR add to charm helpers
unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]
if i_am_a_slave and cfg.get('tuning-level') != 'fast':
# On slaves, this gets overwritten
render(
source='mysql/binlog.cnf',
target='/etc/mysql/conf.d/binlog.cnf',
context={
'unit_id': unit_id,
'format': cfg.get('binlog-format', 'MIXED')
},
)
render(source='mysql/my.cnf', target='/etc/mysql/my.cnf',
context=cfg)
def human_to_bytes(human):
if human.isdigit():
return human
factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}
modifier = human[-1]
if modifier.lower() in factors:
return int(human[:-1]) * factors[modifier.lower()]
raise ValueError("Can only convert K, M, G, and T")
def dataset_size(size, page):
if not size.endswith('%'):
return human_to_bytes(size)
total_mem = human_to_bytes(get_memtotal())
sys_mem_limit = mem_limit()
if is_32bits() and total_mem > sys_mem_limit:
total_ram = sys_mem_limit
factor = int(size[:-1]) * 0.01
pctram = sys_mem_limit * factor
return int(pctram - (pctram % page))
def is_32bits():
try:
IS_32BIT_SYSTEM = sys.maxsize < 2**32.
except OverflowError:
IS_32BIT_SYSTEM = True
return IS_32BIT_SYSTEM
def mem_limit():
import platform
SYS_MEM_LIMIT = human_to_bytes(get_memtotal())
if platform.machine() in ['armv7l']:
SYS_MEM_LIMIT = human_to_bytes('2700M') # experimentally determined
elif is_32bits():
SYS_MEM_LIMIT = human_to_bytes('4G')
return SYS_MEM_LIMIT
def get_memtotal():
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
(key, mem) = line.split(':', 2)
if key == 'MemTotal':
(mtot, modifier) = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_db_helper():
return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',
upasswdf_template='/var/lib/mysql/mysql-{}.passwd',
delete_ondisk_passwd_file=False)
# REFACTOR factory/cache
def get_db_cursor():
import MySQLdb
# Connect to mysql
db_helper = get_db_helper()
passwd = db_helper.get_mysql_root_password()
connection = MySQLdb.connect(user="root", host="localhost", passwd=passwd)
return connection.cursor()
def create_database(name):
# REFACTOR UTF-8
# Clean databasename
cursor = get_db_cursor()
cursor.execute("show databases like '%s'" % name)
if cursor.fetchall():
return name
cursor.execute("create database `%s` character set utf8" % name)
cursor.close()
return name
def create_user():
# REFACTOR pwgen python module? maybe? yeah?
(user, password) = check_output(['pwgen', '-N 2', '15']).split('\n')[:-1]
cursor = get_db_cursor()
grant_sql = "grant replication client on *.* to `%s` identified by '%s'"
cursor.execute(grant_sql % (user, password))
cursor.close()
return (user, password)
def grant_database(database, user, password):
cursor = get_db_cursor()
cursor.execute(
"grant all on `%s`.* to `%s` identified by '%s'" % (database,
user, password))
cursor.close()
#
#relation_id = os.environ.get('JUJU_RELATION_ID')
#change_unit = os.environ.get('JUJU_REMOTE_UNIT')
#
## We'll name the database the same as the service.
#database_name_file = '.%s_database_name' % (relation_id)
## change_unit will be None on broken hooks
#database_name = ''
#if change_unit:
# database_name, _ = change_unit.split("/")
# with open(database_name_file, 'w') as dbnf:
# dbnf.write("%s\n" % database_name)
# dbnf.flush()
#elif os.path.exists(database_name_file):
# with open(database_name_file, 'r') as dbname:
# database_name = dbname.readline().strip()
#else:
# print 'No established database and no REMOTE_UNIT.'
## A user per service unit so we can deny access quickly
#lastrun_path = '/var/lib/juju/%s.%s.lastrun' % (database_name, user)
#slave_configured_path = '/var/lib/juju.slave.configured.for.%s' % database_name
#slave_configured = os.path.exists(slave_configured_path)
#slave = os.path.exists('/var/lib/juju/i.am.a.slave')
#broken_path = '/var/lib/juju/%s.mysql.broken' % database_name
#broken = os.path.exists(broken_path)
#
#
#
#
#def migrate_to_mount(new_path):
# """Invoked when new mountpoint appears. This function safely migrates
# MySQL data from local disk to persistent storage (only if needed)
# """
# old_path = '/var/lib/mysql'
# if os.path.islink(old_path):
# hookenv.log('{} is already a symlink, skipping migration'.format(
# old_path))
# return True
# # Ensure our new mountpoint is empty. Otherwise error and allow
# # users to investigate and migrate manually
# files = os.listdir(new_path)
# try:
# files.remove('lost+found')
# except ValueError:
# pass
# if files:
# raise RuntimeError('Persistent storage contains old data. '
# 'Please investigate and migrate data manually '
# 'to: {}'.format(new_path))
# os.chmod(new_path, 0o700)
# if os.path.isdir('/etc/apparmor.d/local'):
# render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',
# context={'path': os.path.join(new_path, '')})
# host.service_reload('apparmor')
# host.service_stop('mysql')
# host.rsync(os.path.join(old_path, ''), # Ensure we have trailing slashes
# os.path.join(new_path, ''),
# options=['--archive'])
# shutil.rmtree(old_path)
# os.symlink(new_path, old_path)
# host.service_start('mysql')
|
5,522 | b2371f9c774c605a52ff1a4fae2dd44a856076aa | no=int(input("enter no:"))
rev=0
while no!=0:
r=no%10
no=no//10
rev=rev*10+r
print("reverse no is:",rev)
|
5,523 | 2da10163a40c9720ca9deecd9afb0e39aa885546 | from tkinter import *
from PIL import ImageTk,Image
import sys, os
# This will display images and icon
root = Tk()
root.title("Expanding GUI")
# With ubuntu, it did not work the icon part
#root.iconbitmap('@/home/gxgarciat/Documents/Tkinter/gdrive.ico')
#root.iconphoto(True, PhotoImage(file="@/home/gxgarciat/Documents/Tkinter/gdrive.ico"))
#root.iconbitmap(os.path.join(sys.path[0], "/home/gxgarciat/Documents/Tkinter/gdrive.ico"))
#root.iconbitmap('~home/gxgarciat/Documents/Tkinter/gdrive.ico')
#root.iconphoto(False, Tk.PhotoImage(file='/home/gxgarciat/Documents/Tkinter/gdrive.ico'))
# Importing images is a 3 step process here.
my_img = ImageTk.PhotoImage(Image.open("googledrive.png"))
my_label = Label(image=my_img)
my_label.pack()
# Adding a quit button
buttonquit = Button(root,text="Exit program",command=root.quit)
buttonquit.pack()
root.mainloop()
|
5,524 | d805a1290c107a8d768417a432e338b182b7cd6b | import numpy as np
class LinearRegressor():
def __init__(self, alpha=0.1, epochs=1):
self.alpha = alpha
self.epochs = epochs
self.costs = []
self.theta = None
def _cost_function(self, y_pred, y, m):
"""
Gets the cost for the predicted values when contrasted with the correct ones.
y_pred: An (1 x m) vector that corresponds to the values predicted by the Linear Regressor
y: An (1 x m) vector that corresponds to the y (right) values in the dataset
m: the number of samples (it could be also inferred from the shape of y or y_pred)
TODO: You must implement the cost function and return an scalar that corresponds to the error produced by the Linear Regressor with its current configuration
"""
sumatory = 0
for x in range(m):
sumatory += (y_pred[0][x] -y[0][x])**2
cost = 1/(2*m) * sumatory
return cost
def _hypothesis(self, X):
"""
Calculates the hypothesis for the given examples using the current self.theta values.
X: an m x n array of m samples/examples with n features each.
Creo que X es en realidad nxm
transpose de theta es 1xn y * nxm = 1xm
TODO: you must return a (1 x m) array, which corresponds to the estimated value for each of the m samples
"""
# * is element wise multiplication
# numpy.dot(), or @ operator will work
result = np.transpose(self.theta)@ X
#emptyResult = np.zeros((1,X.shape[1]))
return result
def _cost_function_derivative(self, y_pred, y, X, m):
"""
Calculates the derivatives (gradient) of the cost function through the obtained/predicted values.
y_pred: an (1 x m) array with the predicted values for X dataset
y: an (1 x m) array with the right values for X dataset
X: the input dataset
m: the number of samples in the dataset
TODO: You must implement the calculation of derivatives. An (n x 1) array that corresponds to the gradient of current theta values (the derivative per theta parameter) must be returned.
"""
derivatives= np.zeros((X.shape[0],1))
for j in range(X.shape[0]):
auxsum = 0
for i in range(m):
auxsum+=(y_pred[0][i] -y[0][i])*X[j][i]
derivatives[j][0] = self.theta[j][0] - self.alpha * 1/m * auxsum
#empty_derivatives = np.zeros((X.shape[0],1))
return derivatives
def fit(self, X, y):
"""
Fits the linear regressor to the values in the dataset
X: is an (n x m) vector, where n is the number of features and m is the number of samples/examples
y: is an (1 x m) vector, where m is the number of samples/examples
TODO: You need to provide an implementation that in each epoch is updating the values for the theta parameters by using the hypothesis and cost function functions
"""
n, m = X.shape[0], X.shape[1]
# theta is (nx1) (one theta per dimension)
self.theta = np.random.uniform(-10, 10, (n, 1))
for i in range(self.epochs):
# Get predictions
y_pred = self.predict(X)
# calculate cost
# cost = ...
cost = self._cost_function(y_pred, y, m)
# gradient is an (n) x 1 array, it refers to the derivate per theta
gradient = self._cost_function_derivative(y_pred, y, X, m)
# delta/update rule
self.theta = gradient
self.costs.append(cost)
pass
print("Final theta is {} (cost: {})".format(self.theta.T, cost))
def predict(self, X):
"""
Predicts the values for the given X samples using the current configuration of the Linear Regressor.
X: an (n x m') array with m' samples of n dimensions whose value must be predicted.
TODO: You must return a (1 x m') array that includes the predictions for the given m' samples.
"""
# ! You could simply call the hypothesis here
predictions= self._hypothesis(X)
#empty_predictions = np.zeros((1,X.shape[1]))
return predictions |
5,525 | 1fe6fab717a77f13ddf7059ef0a5aaef217f0fb0 | class Solution:
def search(self, nums: List[int], target: int) -> int:
n = len(nums)
left, right = 0, n-1
found = False
res = None
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
found = True
res = mid
break
elif nums[mid] >= nums[0]:
if target < nums[mid] and target >= nums[0]:
right = mid - 1
else:
left = mid + 1
# nums[mid] > target
elif nums[mid] < nums[0]:
if target > nums[mid] and target <= nums[-1]:
left = mid + 1
else:
right = mid - 1
if found:
print("res is: ", res)
return res
else:
print("res is: ", -1)
return -1
"""
https://leetcode.cn/submissions/detail/320442719/
执行用时:
36 ms
, 在所有 Python3 提交中击败了
73.39%
的用户
内存消耗:
15.2 MB
, 在所有 Python3 提交中击败了
62.74%
的用户
通过测试用例:
195 / 195
"""
|
5,526 | 64bbf2e3b961a6e0b5d7e551278bb21990df2ed9 | import uuid
from fastapi import APIRouter, Depends, HTTPException, Form, Body
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from sqlalchemy.orm import Session
# dependency
from configs.config_sqlalchemy import get_db
# schema
from schema import store_schema
# define the url the client will use to access the token
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="auth/login")
# router object
router = APIRouter(
prefix="/auth",
tags=["AUTHORIZATION AND AUTHENTICATION"],
responses={
200:{'description':'Ok'},
201:{'description':'created'},
400: {"description": "Bad Request"},
404: {"description": "Not found"}
}
)
# register a new account
@router.post("/account/register",
summary='register to create a new store',
response_model=store_schema.Store,
status_code=201
)
async def account_register(
StoreName: str = Body(...),
OwnerFirstName: str = Body(...),
OwnerLastName: str = Body(...),
OwnerEmail: str = Body(...),
):
return
# account login
@router.post('/login',
summary='login to get access token',
status_code=200
)
async def login(form_data: OAuth2PasswordRequestForm = Depends(), db:Session=Depends(get_db)):
user = authenticate_user(email=form_data.username, password=form_data.password, db=db)
if not user:
raise HTTPException(
status_code=401,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": str(user.id)}, expires_delta=access_token_expires
)
return {"access_token": access_token, "token_type": "bearer", "user":user}
|
5,527 | 4daab8b8db1e394e3132ab5550fe0236b67074d8 | from helper import *
tree_type = TREE_TYPE_SPLIT
file_name = ''
file_path = ''
split_scalars = {}
visited = {}
adjacency = {}
pairs = {}
index_map = {}
postorder_map = {}
preorder_map = {}
birth = {}
death = {}
string = ''
class Tree(object):
def __init__(self):
self.index = None
self.children = []
self.parent = None
self.label = None
self.pair = None
self.birth = None
self.death = None
self.postorder = None
self.preorder = None
def __str__(self):
return str(self.index)
def initialize_tree(index):
root = Tree()
root.index = index
root.label = split_scalars[index]
root.pair = pairs[index]
# add mapping to dictionary
index_map[index] = root
return root
def add_node(index, parent):
node = Tree()
node.index = index
parent.children.append(node)
node.parent = parent
node.label = split_scalars[index]
node.pair = pairs[index]
# add mapping to dictionary
index_map[index] = node
return node
def compare_nodes(a, b):
# try to sort using the split_scalars
# if they are equal, sort using index value
if split_scalars[a] > split_scalars[b]:
return 1
elif split_scalars[a] == split_scalars[b]:
if a > b:
return 1
else:
return -1
else:
return -1
def traverse(index, parent):
#print index, split_scalars[index]
visited[index] = True
adjacency[index].sort(compare_nodes)
for node in adjacency[index]:
if not visited[node]:
current = add_node(node, parent)
traverse(node, current)
def add_pairs(node):
if(node == None):
return
else:
node.pair = index_map[pairs[node.index]]
node.birth = index_map[birth[node.index]]
node.death = index_map[death[node.index]]
for child in node.children:
add_pairs(child)
def postorder(node):
# python needs a mutable object for updation
order = {'index': 1}
def set_order(node):
if(node == None):
return
else:
for child in node.children:
set_order(child)
node.postorder = order['index']
postorder_map[order['index']] = node
order['index'] += 1
set_order(node)
def preorder(node):
# python needs a mutable object for updation
order = {'index': 1}
def set_order(node):
if(node == None):
return
else:
node.preorder = order['index']
preorder_map[order['index']] = node
order['index'] += 1
for child in node.children:
set_order(child)
set_order(node)
def stringify_tree(node):
global string
if(node == None):
return
else:
string += '{'
string += str(node.postorder) + '|'
string += str(node.index) + '|'
string += str(node.label) + '|'
string += str(node.birth.label) + '|'
string += str(node.death.label)
for child in node.children:
stringify_tree(child)
string += '}'
return string
def get_merge_tree():
# Get merge tree path
tree_file_arguments = [tree_type, TREE_INFIX, file_name, CSV_EXTENSION]
tree_file_path = get_output_path(file_path, tree_file_arguments, folder_name = TREES_FOLDER)
# Read merge tree file
with open(tree_file_path, 'rb') as csvfile:
csvfile.readline()
spamreader = csv.reader(csvfile, delimiter=' ')
for r in spamreader:
row = r[0].split(',')
node1 = int(row[0])
node2 = int(row[1])
split_scalars[node1] = float(row[2])
split_scalars[node2] = float(row[3])
visited[node1] = False
visited[node2] = False
if node1 not in adjacency.keys():
adjacency[node1] = []
if node2 not in adjacency.keys():
adjacency[node2] = []
adjacency[node1].append(node2)
adjacency[node2].append(node1)
for i in adjacency.keys():
if len(adjacency[i]) == 1:
if (split_scalars[i] < split_scalars[adjacency[i][0]]):
root = i
return root
def get_persistent_pairs():
# Get persistence pairs
pairs_file_arguments = [tree_type, PAIRS_INFIX, file_name, CSV_EXTENSION]
pairs_file_path = get_output_path(file_path, pairs_file_arguments, folder_name = PAIRS_FOLDER)
with open(pairs_file_path, 'rb') as persistence_pairs:
persistence_pairs.readline()
spamreader = csv.reader(persistence_pairs, delimiter=' ')
for r in spamreader:
row = r[0].split(',')
node1 = int(row[0])
node2 = int(row[1])
#if (node1 in split_scalars.keys()) and (node2 in split_scalars.keys()):
# there will be pairs that do not exist in the merge tree
# they will be removed/ignored subsequently
pairs[node1] = node2
pairs[node2] = node1
# add birth and death values of nodes to dictionaries
birth[node1] = node1
death[node1] = node2
birth[node2] = node1
death[node2] = node2
def write_tree(node):
tuple_file_arguments = [file_name, TXT_EXTENSION]
tuple_file_path = get_output_path(file_path, tuple_file_arguments, folder_name = TUPLES_FOLDER)
tuple_file = open(tuple_file_path, 'w')
fieldnames = ['timestep', 'postorder', 'value', 'birth', 'death']
writer = csv.writer(tuple_file, delimiter=',')
writer.writerow(fieldnames)
def pretty_print_tree(node):
if(node == None):
return
else:
timestep = file_name.split('tv_')[1]
values = [timestep, node.postorder, node.label, node.birth.label, node.death.label]
writer.writerow(values)
for child in node.children:
pretty_print_tree(child)
pretty_print_tree(node)
def print_treemap(node):
processed_nodes = {}
treemap_string = {}
treemap_value = {}
treemap_parent = {}
treemap_container = {}
def find_treemap_parent(node):
if node.preorder not in processed_nodes:
parent_node = node.parent
paired_node = node.pair
parent_found = False
# keep going up the merge tree till you find a parent that itself and its pair within the range
while((parent_node != None) and (parent_found == False)):
if parent_node.preorder < node.preorder < parent_node.pair.preorder:
parent_found = True
else:
parent_node = parent_node.parent
if not parent_found:
treemap_container[node.preorder] = str(node.preorder)
treemap_parent[node] = None
treemap_parent[node.pair] = node
else:
treemap_container[node.preorder] = treemap_container[parent_node.preorder] + "." + str(node.preorder)
treemap_parent[node.pair] = node
treemap_parent[node] = parent_node
treemap_string[node.preorder] = treemap_container[node.preorder] + "." + str(node.preorder)
treemap_string[node.pair.preorder] = treemap_container[node.preorder] + "." + str(node.pair.preorder)
treemap_value[node.pair.preorder] = node.pair.label
treemap_value[node.preorder] = node.label
processed_nodes[node.preorder] = True
processed_nodes[node.pair.preorder] = True
def get_tree_structure(node):
if(node == None):
return
else:
find_treemap_parent(node)
for child in node.children:
get_tree_structure(child)
get_tree_structure(node)
for key in treemap_container.keys():
print str(treemap_container[key]) + ","
for key in treemap_string.keys():
print str(treemap_string[key]) + ","+ str(int((treemap_value[key]+0.05)*1000))
def print_label(node):
print str(node.preorder) + " [label=\""+ str(node.preorder) + " \\n["+ str(node.pair.preorder) + "]"+"\"]"
def print_edge(node):
print str(node.parent.preorder) + "->" + str(node.preorder)
def print_tree_dot(node):
if(node == None):
return
else:
print_label(node)
for child in node.children:
print_edge(child)
print_tree_dot(child)
def make_tree(name, path):
global file_name, file_path
file_name = name
file_path = path
root = get_merge_tree()
get_persistent_pairs()
tree = initialize_tree(root)
traverse(root, tree)
add_pairs(tree)
postorder(tree)
preorder(tree)
#write_tree(tree)
print_treemap(tree)
#print "digraph {"
#print_tree_dot(tree)
#print "}"
|
5,528 | 6cc23a3e2fa3b1baddf05b30a1054a7faf0371a6 | # -*- coding: utf-8 -*-
from .base import BaseSchema
from marshmallow import fields
class BaseTickSchema(BaseSchema):
"""
Time : 时间
High : 最高价
Low : 最低价
Volume : 交易量
Last : 最新价
"""
Time = fields.String()
High = fields.String()
Low = fields.String()
Volume = fields.String()
Last = fields.String()
|
5,529 | 530c2c185e57ffd3ac64628fc9f7f7985b0480fe | #!/usr/bin/env python
import numpy as np
import time, random
import sys, os, struct, socket
import psycopg2
import test_coords
import alex_random
import new_sim_utils
import sdr_kml_writer
from geo_utils import geo_utils
from beacon import beacon
from sim_data import data_utils
ENABLE_JITTER = False
ENABLE_DROPPED_PACKETS = False
ENABLE_LOCATION_HISTORY = True
ENABLE_BEACON_DELAY = False
class simulation:
def __init__(self):
"""__init__"""
self.geo_utils = geo_utils()
self.DEBUG = True
self.rx_number = 4
self.packet_number = 0
self.iterator = 1
self.packet_error_rate = 0.1
self.all_locations = []
def init_sim(self,n):
"""
initialize simulation for n receivers.
"""
self.beacon = beacon(ENABLE_BEACON_DELAY)
self.data = data_utils(n)
random.seed()
if n < 3:
print 'Number of receivers %i is less than three.' %n
print 'Simulation controller will not run.'
print 'Now exiting.'
sys.exit()
self.data.set_rx_number(n)
tx_loc = test_coords.get_tx_coords()
self.data.set_tx_location(tx_loc)
# self.data.reset_rx_location()
for i in range(n):
rx_loc = alex_random.get_random_coord()
if self.DEBUG:
print "\n\n\n\n\n\nstore location: ", rx_loc
print '\n\n\n\n\n\n'
self.data.set_rx_location(i,rx_loc)
tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)
self.data.set_rx_time_delay(tof)
id = i+1
self.data.set_rx_team_id(id)
if self.DEBUG:
print 'tx_loc: ', tx_loc
print 'rx_loc: ', rx_loc
print 'time: ', repr(tof)
print 'id: ', id
def rx_beacon_packet(self):
"""
receive a single beacon packet. this will then be copied n times.
this tries to ensure clock synchronization across receivers.
"""
self.beacon.make_packet()
rx_packet = self.beacon.tx_packet()
rx_time = np.float128('%.20f'%(time.time()))
if self.DEBUG:
print 'rx_time: ', repr(rx_time)
self.data.set_timestamp_base(rx_time)
self.data.set_beacon_packet(rx_packet)
def receiver_chain(self,h):
"""
simulate receiver chain for n repeaters
"""
self.host = h
n = self.data.get_rx_number()
beacon_packet = self.data.get_beacon_packet()
time_base = self.data.get_timestamp_base()
# lists containing data for all current teams
team_id = self.data.get_rx_team_id()
location = self.data.get_rx_location()
if ENABLE_LOCATION_HISTORY:
self.record_location_history(location)
tof = self.data.get_rx_time_delay()
if self.DEBUG:
print "\n\n\n\n\n\nretrieve location: ", location
print ''
print "type(tof): ", type(tof)
conn = psycopg2.connect(host = self.host,
user = "sdrc_user",
password = "sdrc_pass",
database = "sdrc_db")
cur = conn.cursor()
for i in range(n):
f = open('data_in.data', 'a')
(rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])
(beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])
# packet number
payload1 = struct.pack('!H', self.packet_number & 0xffff)
f.write(str(self.packet_number) + ';')
# team id
ident = team_id[i]
payload2 = struct.pack('!H', ident & 0xffff)
f.write(str(ident) + ';')
# location
if (self.iterator == 1):
loc = location[i]
else:
# old_loc = location[i]
# loc = alex_random.random_move(old_loc)
loc = alex_random.get_random_coord()
self.data.set_rx_location(i,loc)
f.write(str(loc)+';')
self.iterator += 1
payload3 = new_sim_utils.pack_loc(loc)
# toa
t = tof[i]
toa = time_base + t
# if (ENABLE_JITTER):
# jitter = self.random_timing_jitter()
# toa = toa+jitter
# else:
# pass
if self.DEBUG:
print "t = tof[i]: ", repr(t)
print "type(t): ", type (t)
print "toa = time_base + t: ", repr(toa)
print "type(toa): ", type(toa)
payload4 = new_sim_utils.pack_time(toa)
f.write(repr(toa)+';')
# beacon payload
payload5 = struct.pack('!H', rx_pktno & 0xffff)
f.write(str(rx_pktno) + ';')
payload6 = struct.pack('!H', beacon_ID & 0xffff)
f.write(str(beacon_ID) + '\n')
f.close()
# check if packet dropped
drop = self.drop_packet()
# this if evaluates true even if drop == False
# if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'
# print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
# print 'drop ', drop
# print (ENABLE_DROPPED_PACKETS and drop)
# print 'packet dropped'
# payload = ''
if ENABLE_DROPPED_PACKETS:
print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
print 'drop ', drop
if drop: # if drop == 'True'
print 'drop ', drop
print 'packet dropped'
payload = ''
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
print "len(payload): ", len(payload)
cur.execute("INSERT INTO blob_table (field_1) VALUES (%s)", (psycopg2.Binary(payload),))
conn.commit()
cur.close()
conn.close()
self.packet_number += 1
def record_location_history(self,loc):
self.all_locations.append(loc)
# if self.DEBUG:
# print 'all locations:\n', self.all_locations
# def write_location_history(self):
# # f = open('location_history','w+')
# for i in self.all_locations:
# print repr(i[0][0][0]), repr(i[0][0][1]))
# # f.write(repr(i)+'\n')
# print '\n\n\n\n\n\n\n'
# print len(i)
# # f.close()
# kml_write = sdr_kml_writer.kml_writer()
# for i in range(0,len(x_results)):
# coord = str(x_results[i])+','+str(y_results[i])
# kml_write.add_placemark('','',coord)
# kml_write.write_to_file('geoloc_kml_file.kml')
def random_timing_jitter(self):
r = random.uniform(0,1)
jitter = r*1e-9
if self.DEBUG:
print 'Random timing jitter %f seconds' %(jitter)
return jitter
def drop_packet(self):
r = random.uniform(0,1)
print 'random value: ', r
print 'error rate: ', self.packet_error_rate
if (r > self.packet_error_rate):
drop = False
else:
drop = True
if self.DEBUG:
print 'Probability of dropped packet: ', self.packet_error_rate
print 'Packet dropped? ', drop
return drop
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", type="string", default="128.173.90.68",
help="database host in dotted decimal form [default=%default]")
parser.add_option("-r", "--radios", type="int", default="3",
help="number of field radios to simulate [default=%default]")
parser.add_option("-i", "--iterations", type="int", default="10",
help="number of times to run simulation [default=%default]")
# parser.add_option("-d", "--drop", action="store_true", default=False,
# help="simlulate dropped packets [default=%default]")
# parser.add_option("-j", "--jitter", type="store_true", default=False,
# help="simulate clock jitter, drift... [default=%default]")
(options, args) = parser.parse_args()
main = simulation()
main.init_sim(options.radios)
for i in range(options.iterations):
main.rx_beacon_packet()
main.receiver_chain(options.host)
# main.write_location_history()
# don't use, adbapi can't handle too many db connections...
# #self.data.set_rpt_packet(payload)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sys.stdout.write("sock.connect((HOST, PORT)) ...")
# sock.connect((HOST, PORT))
# sys.stdout.write(" Done\n")
# sys.stdout.write("sock.send...")
# sock.send('%s\r\n' % payload)
# sys.stdout.write(" Done\n")
# sock.close()
# # don't use if using sockets above
# def write_to_db(self):
# data = self.data.get_rpt_packet()
# print 'conn = MySQLdb.connect'
# db = MySQLdb.connect (host = "localhost",
# user = "sdrc_user",
# passwd = "sdrc_pass",
# db = "test01")
# print 'cursor = conn.cursor ()'
# cursor = db.cursor ()
# table = 'test01_table'
# fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'
# # reset database
# cursor.execute("""DELETE FROM %s""" %(table,))
# for i in range(len(data)):
# sql = """ """
# print "loop: ",i
# payload = data[i]
# (rpt_packet_num,) = struct.unpack('!H',payload[0:2])
# (rpt_team_id,) = struct.unpack('!H',payload[2:4])
# rpt_location = new_sim_utils.unpack_loc(payload[4:24])
# rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])
# (beacon_packet_num,) = struct.unpack('!H',payload[36:38])
# (beacon_id,) = struct.unpack('!H',payload[38:40])
# print type(beacon_id)
# sql = """INSERT INTO %s %s VALUES (\'%d\', \'%d\', \'%s\', \'%s\', \'%d\', \'%d\')""" %(table,fields,rpt_packet_num,
# rpt_team_id,str(rpt_location),
# repr(rpt_timestamp),beacon_id,
# beacon_packet_num)
# print sql
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# print 'db.close()'
# db.close()
# def send_rpt_packet(self):
# """
# transmit repeater packets
# """
# pass
# def run(self):
# """
# run.
# """
# pass
# def work(self):
# """
# work function.
# """
# pass
# def __str__(self):
# """
# Print data in class: simulation
# """
# string = '\n########\nSimulation START\n'
# string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\n'
# string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\n'
# string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\n'
# string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\n'
# string += 'rpt_packet: ' + str(self.data.get_rpt_packet())
# string += '########\nSimulation END\n'
# return string
# print main
# main.write_to_db()
# # not sure if we need this here
# dist = self.geo_utils.distance(__tx_loc,__rx_loc)
# self.__set_rx_distance(__dist)
# __power = new_sim_utils.power(__dist)
# self.set_rx_power(__power)
# def add_receiver(self):
# """
# add additional receiver to simulation
# """
# pass
# # do we really need this? don't think so...
# def copy_beacon_packet(self):
# """
# make n copies of beacon packet
# """
# num = self.get_rx_number()
# beacon_packet = self.get_beacon_packet()
# for i in range(__num):
# self.set_n_beacon_packet(__beacon_packet)
# Prepare SQL query to INSERT a record into the database.
# try:
# Execute the SQL command
# Commit your changes in the database
# except:
# # Rollback in case there is any error
# print 'db.rollback()'
# db.rollback()
# # disconnect from server
# cursor = db.cursor ()
# table = 'blob_table'
# fields = '(field_1)'
# sql = """INSERT INTO %s %s VALUES (\'%\r')""" %(table,fields,payload)
# print str(sql)
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# db.close()
|
5,530 | 25ce31aee44c80ce4a5c1af7d1ca12c73c14df47 | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(auto_now_add=timezone.now)
author = models.ForeignKey(to=User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class SurveyHistory(models.Model):
post = models.ForeignKey(to=Post, on_delete=models.CASCADE)
record = models.BooleanField()
recorded_date = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return self.post.title
|
5,531 | 4df747b3ff254e0ccc4483acd7be12f3441bbcd8 |
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weigh?",
weight = raw_input()
print "So, you're %r old, %r tall, and %r heavy." % (age, height, weight)
#raw_input does not exist in Python 3.x while input() does. raw_input() returns a string, and input() tries to run the input as a Python expression.
# Since getting a string was almost always what you wanted, then use int(raw_input()) to get a string back.
# do NOT use eval(raw_input()), because you need to type answers in quotes like "1" and you don't want '"1"' in quotes as the answer. You only want the number 1.
# http://mail.python.org/pipermail/tutor/2001-February/003494.html
print "How many meals do you eat a day?",
eat = int(raw_input())
print "How many times do you brush your teeth a day?",
brush = int(raw_input())
print "So you eat %r meals and brush %r times a day." % (eat, brush)
print "type anything"
x = int(raw_input())
|
5,532 | 505689803c8f4490619ab1a7579fde1e2c18c538 | from datetime import datetime
import struct
BEACON_LENGTH = 84
EPS_LENGTH = 20
COM_LENGTH = 10
# reverse engineered
ADCS1_LENGTH = 7
ADCS2_LENGTH = 6
AIS_LENGTH = 20
class EPS(object):
def __init__(self, eps_data):
if len(eps_data) != EPS_LENGTH:
raise InputException(len(eps_data), EPS_LENGTH)
self.boot_count, self.uptime, self.rt_clock, self.ping_status, self.subsystem_status,\
self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,\
self.temp, self.pa_temp, self.main_voltage = struct.unpack(">HIIBHBbbBbbb", eps_data)
self.battery_voltage *= 40
self.cell_diff *= 4
self.battery_current *= 10
self.solar_power *= 20
def __str__(self):
eps_str = ("""EPS:
Boot count:\t\t{0}
Up time:\t\t{1} seconds
Real time clock:\t{2}
Battery voltage:\t{3} mV
Cell difference:\t{4:.1f} mV
Battery current:\t{5} mA
Solar power:\t\t{6}
Temperature:\t\t{7} C
PA temperature:\t\t{8} C""".format(
self.boot_count, self.uptime, datetime.fromtimestamp(self.rt_clock),
self.battery_voltage, self.cell_diff, self.battery_current, self.solar_power,
self.temp, self.pa_temp))
return eps_str
class COM(object):
def __init__(self, com_data):
self.boot_count, self.packets_received, self.packets_send, self.latest_rssi,\
self.latest_bit_correction, self.latest_byte_correction = \
struct.unpack(">HHHhBB", com_data)
self.boot_count &= 0x1fff
def __str__(self):
com_str = ("""COM:
Boot count:\t\t{0}
Packets received:\t{1}
Packets send:\t\t{2}
Latest rssi:\t\t{3}
Latest bit corrections:\t{4}
Latest byte corrections:{5}""".format(
self.boot_count, self.packets_received, self.packets_send,
self.latest_rssi, self.latest_bit_correction, self.latest_byte_correction))
return com_str
# Reverse engineered classes
class ADCS1(object):
def __init__(self, adcs1_data):
data = struct.unpack(">hhhB", adcs1_data)
self.bdot = tuple(data[0:3])
self.state = data[3]
def __str__(self):
adcs1_str = ("""ADCS1:
State:\t{}
Bdot:\t{}""".format(self.state, self.bdot))
return adcs1_str
class ADCS2(object):
def __init__(self, adcs2_data):
self.gyro = tuple(struct.unpack(">hhh", adcs2_data))
def __str__(self):
adcs2_str = ("""ADCS2:
Gyro:\t{}""".format(self.gyro))
return adcs2_str
class AIS(object):
def __init__(self, ais_data):
# there are some fields which apparently are 0 all the time
# this fields can't be identified by reverse engineering
self.boot_count, _, _, self.unique_mssi, _ = struct.unpack(">HhhH12s", ais_data)
def __str__(self):
ais_str = ("""AIS:
Boot count:\t{}
Unique MSSI:\t{}""".format(self.boot_count, self.unique_mssi))
return ais_str
## Beacon
# The beacon class takes a string of bytes as input, and parses it to generate
# a representation of the beacon format used by AASUAT4
# The beacon format is as follows:
# [ 1 byte | 19 bytes | 12 bytes | 7 bytes | 6 bytes | 20 bytes | 20 bytes ]
# [ Valid | EPS | COM | ADCS1 | ADCS2 | AIS1 | AIS2 ]
# This is not correct EPS is 20 bytes and COM is 10 bytes
# The remaining fields seem to have the correct length
#
# For each subsystem, which are valid, are the corresponding data bytes passed to another
# class which parses the information.
#
# The __str__ method returns a human readable string with key information from the beacon
class Beacon(object):
def __init__(self, raw_data):
if len(raw_data) != BEACON_LENGTH:
raise ValueError("Malformed beacon (incorrect length)")
self.subsystems = {}
valid, eps_raw, com_raw, adcs1_raw, adcs2_raw, ais1_raw, ais2_raw = \
struct.unpack(("B"+"{}s"*6).format(EPS_LENGTH, COM_LENGTH, ADCS1_LENGTH, ADCS2_LENGTH, AIS_LENGTH, AIS_LENGTH), raw_data)
# reverse engineered valid bits
# EPS and COM are known from university team code
# valid byte is usually 0x27
# in DK3WN's blog we see that EPS, COM, AIS2 and ADCS1 are valid
eps_valid = valid & (1 << 0)
com_valid = valid & (1 << 1)
adcs1_valid = valid & (1 << 2)
adcs2_valid = valid & (1 << 3)
ais1_valid = valid & (1 << 4)
ais2_valid = valid & (1 << 5)
if eps_valid:
self.subsystems['EPS'] = EPS(eps_raw)
if com_valid:
self.subsystems['COM'] = COM(com_raw)
if adcs1_valid:
self.subsystems['ADCS1'] = ADCS1(adcs1_raw)
if adcs2_valid:
self.subsystems['ADCS2'] = ADCS2(adcs2_raw)
if ais1_valid:
self.subsystems['AIS1'] = AIS(ais1_raw)
if ais2_valid:
self.subsystems['AIS2'] = AIS(ais2_raw)
def __str__(self):
beacon_str = ""
for k,v in self.subsystems.items():
beacon_str += str(v) + "\n"
return beacon_str
|
5,533 | a99426c0751885f17078e709fd523cf3a26f5286 | '''
sin(x) = x^1/1! - x^3/3! + x^5/5! - x^7/7! + …..
Input : x, n ( No. of terms I want in series )
Input : 3.14, 10
Output : sin(3.14) = sin(180) = 0
Radians vs Degrees
( 0, 30, 60, 90 ….)
2pi = 360
Pi = 180
Pseudo code :
1.Take input variables radians,num
2. sin = 0
3. Indices = 1
4. odd = 1
4. Iterate indices from 1 to num with condition index <= num
If index%2 == 1
sin = sin + exponent(radians,odd)/factorial(odd)
If index%2 == 0
sin = sin - exponent(radians,odd)/factorial(odd)
Index += 1
odd += 2
5 . print the value of th sin
'''
def exponent(base,index):
if(index == 0 and base == 0):
return -1
elif(index == 0):
return 1
elif(base == 0):
return 0
else:
product = 1
for indices in range(index):
product *= base
return product
def factorial(num):
if(num == 0):
return 1
else:
fact = 1
index =1
while(index <= num):
fact *= index
index = index+1
return fact
radians = 3*3.14159/2
num = 15
sin = 0
index = 1
odd = 1
while(index <= num):
if(index%2 == 1):
sin = sin + (exponent(radians,odd)/factorial(odd))
if(index%2 == 0):
sin = sin - (exponent(radians,odd)/factorial(odd))
index += 1
odd += 2
print("The value of sin for the given radians is :",sin)
|
5,534 | 04b02931b749ad06a512b78ca5661ae1f5cb8a9c | from random import randint
from Ball import Ball
from Util import Vector, Rectangle
class Player:
RADIUS = 10
COLOR1 = "#80d6ff"
COLOR2 = "#ff867c"
OUTLINE = "#000000"
@property
def right(self):
return self.pos.sub(Vector(Player.RADIUS, 0))
@property
def left(self):
return self.pos.add(Vector(Player.RADIUS, 0))
@property
def color(self):
if self.team == 1:
return Player.COLOR1
elif self.team == 2:
return Player.COLOR2
def __init__(self, canvas, team):
self.canvas = canvas
self.team = team
self.pos = Vector(0, 0)
self.old_pos = Vector(0, 0)
self.shape = None
def set(self, v):
self.old_pos = self.pos
self.pos = v
self.paint()
def move(self, v: Vector):
self.set(self.pos.add(v))
def move_to_point(self, point: Vector):
v = randint(1, 10) / 10
self.move(point.sub(self.pos).norm().mul(Vector(v, v)))
def get_ball(self, ball):
if self.team == 1:
ball.set(self.right)
elif self.team == 2:
ball.set(self.left)
def paint(self):
if self.shape is None:
self.shape = self.canvas.create_rectangle(-Player.RADIUS, -Player.RADIUS, Player.RADIUS, Player.RADIUS,
outline=Player.OUTLINE, fill=self.color)
delta = self.pos.sub(self.old_pos)
self.canvas.move(self.shape, delta.x, delta.y)
def rectangle(self) -> Rectangle:
return self.pos.rect(Player.RADIUS)
def ball_hit_test(self, ball: Ball) -> bool:
return self.rectangle().hit(ball.pos)
|
5,535 | be5a683309317f1f6ebc20ad3511fd2b2510e806 | from django.http.response import HttpResponse
from django.shortcuts import render , HttpResponse
import requests
from django.conf import settings
from .forms import WeatherForm
# Create your views here.
def get_weather(request):
form = WeatherForm()
error = ""
output = {}
if request.method == 'POST':
form = WeatherForm(request.POST)
if form.is_valid():
data = form.cleaned_data
latitude = data['latitude']
longitude = data['longitude']
url = settings.WEATHER_URL
url += "weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s"%(latitude,longitude)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64;"}
response = requests.get(url,headers=headers)
if response.status_code == 200:
output = response.json()
else:
error = response.text
return render(request=request,template_name="core/weather.html", context= {'form':form ,
'error':error , "output":output}) |
5,536 | ba94a69ac356969ab593afc922a2517f4713771f | __title__ = 'FUCKTHEINTRUDERS'
__description__ = 'Checking for Intruders in my locality'
__version__ = '0.0.1'
__author__ = 'Shivam Jalotra'
__email__ = 'shivam_11710495@nitkkr.ac.in'
__license__ = 'MIT 1.0'
|
5,537 | a638504737d0069d4fa40b0fc5026203904563e8 | from decimal import Decimal
from django.conf import settings
from blood.models import Bank, Blood
class Cart(object):
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, blood, quantity=1, update_quantity=False):
blood_id = str(blood.id)
max_quantity = Blood.objects.get(id=blood.id).stock
if blood_id not in self.cart:
self.cart[blood_id] = {
'quantity': 0, 'price': str(blood.price)}
if update_quantity and self.cart[blood_id]['quantity'] <= max_quantity:
self.cart[blood_id]['quantity'] = quantity
elif int(self.cart[blood_id]['quantity']+quantity) <= max_quantity:
self.cart[blood_id]['quantity'] += quantity
self.save()
def save(self):
self.session[settings.CART_SESSION_ID] = self.cart
self.session.modified = True
def remove(self, blood):
blood_id = str(blood.id)
if blood_id in self.cart:
del self.cart[blood_id]
self.save()
def __iter__(self):
blood_ids = self.cart.keys()
bloods = Blood.objects.filter(id__in=blood_ids)
for blood in bloods:
self.cart[str(blood.id)]['blood'] = blood
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())
def clear(self):
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
|
5,538 | 3beaea1f2b1b085a60bdc5e53f4e6d9aff7e8b6f | import cv2,os
import sqlite3
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('Classifiers/face.xml')
i = 0
offset = 50
def create_or_open_db(db_file):
db_is_new = not os.path.exists(db_file)
conn = sqlite3.connect(db_file)
if db_is_new:
print 'Creating schema'
sql = '''create table if not exists PEOPLES(
ID INTEGER PRIMARY KEY,
Name TEXT);'''
sql_image = '''create table if not exists PICTURES(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
Picture BLOB,
Type TEXT,
File_name TEXT);'''
sql_trainer = '''create table if not exists TRAINER(
ID INTEGER PRIMARY KEY,
File BLOB,
Type TEXT,
File_name TEXT);'''
conn.execute(sql) # shortcut for conn.cursor().execute(sql)
conn.execute(sql_image) # create image table
conn.execute(sql_trainer) # create trainer table
else:
print 'Schema exists\n'
return conn
def insertOrUpdate(Id,Name):
conn=sqlite3.connect("FaceBase.db")
cmd="SELECT * FROM PEOPLES WHERE ID="+str(Id)
cursor=conn.execute(cmd)
isRecordExist=0
for row in cursor:
isRecordExist=1
if(isRecordExist==1):
cmd="UPDATE PEOPLES SET NAME='"+str(Name)+"' WHERE ID="+str(Id)
else:
cmd="INSERT INTO PEOPLES(ID,NAME)Values("+str(Id)+",'"+str(Name)+"')"
conn.execute(cmd)
conn.commit()
conn.close()
def insert_picture(picture_file):
conn = create_or_open_db('FaceBase.db')
with open(picture_file, 'rb') as input_file:
ablob = input_file.read()
base=os.path.basename(picture_file)
afile, ext = os.path.splitext(base)
sql = '''INSERT INTO PICTURES
(PICTURE, TYPE, FILE_NAME)
VALUES(?, ?, ?);'''
conn.execute(sql,[sqlite3.Binary(ablob), ext, afile])
conn.commit()
# picture_file = "./dataSet/face- 2.1.jpg"
# insert_picture(conn, picture_file)
# conn.close()
id=raw_input('Digite o id ')
name=raw_input('Digite o Nome ')
create_or_open_db('FaceBase.db')
insertOrUpdate(id,name)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=detector.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(100, 100), flags=cv2.CASCADE_SCALE_IMAGE)
for(x,y,w,h) in faces:
i=i+1
cv2.imwrite("dataSet/face-"+id +'.'+ str(i) + ".jpg", gray[y-offset:y+h+offset,x-offset:x+w+offset])
#picture_file = "./dataSet/face-"+id +'.'+ str(i) + ".jpg"
#insert_picture(picture_file)
cv2.rectangle(im,(x-50,y-50),(x+w+50,y+h+50),(225,0,0),2)
cv2.imshow('im',im[y-offset:y+h+offset,x-offset:x+w+offset])
cv2.waitKey(100)
if i>70:
cam.release()
cv2.destroyAllWindows()
break
|
5,539 | 88542a18d98a215f58333f5dd2bf5c4b0d37f32f | x = 5
print(x , " "*3 , "5")
print("{:20d}".format(x))
|
5,540 | acf409f2e56cd16b7dc07476b49b9c18675f7775 | from PIL import Image
from flask_restplus import Namespace, Resource
from werkzeug.datastructures import FileStorage
from core.models.depthinthewild import DepthInTheWild
from core.utils import serve_pil_image
api = Namespace('nyudepth', description='Models Trained on NYUDepth')
upload_parser = api.parser()
upload_parser.add_argument('image', location='files',
type=FileStorage, required=True)
@api.route('/depthinthewild/transform')
@api.expect(upload_parser)
class DepthInTheWildDepthTransform(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
_, depth_map_img = hourglass.transform(image)
return serve_pil_image(depth_map_img)
@api.route('/depthinthewild/transform_raw')
@api.expect(upload_parser)
class DepthInTheWildDepthTransformRaw(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
depth_map, _ = hourglass.transform(image)
return dict(depth_map=depth_map)
|
5,541 | 0295d6ba962d099e76110c7a0e39748e3163e300 | #!/usr/bin/env python
###########################################################################
# 1) connect to the MQTT broker
# 2) subscribe to the available data streams
# 3) log to google sheets
# 4) notify on critical events on the telegram channel
###########################################################################
import time
import datetime
import os
import string
import paho.mqtt.client as mqtt
#import requests
#from googleapiclient import discovery
#from oauth2client import client
#from oauth2client import tools
#from oauth2client.file import Storage
import telepot
import json
from influxdb import InfluxDBClient
import sys
DEBUG = False
UTC_OFFSET = 3 # hours of differenc between UTC and local (Jerusalem) time
RECORD_INTERVAL = 5*60 #number if seconds between subsequent recods in google sheets and InfluxDB
NOTIFY_INTERVAL = 1*60 #number if seconds between subsequent notification on telegram
HOME_DIR = '/home/pi' #home directory
localTimeOut = 120 # Local MQTT session timeout
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
last_record = {}
last_notify = {}
# get configuration from json
with open( os.path.join(__location__, 'config.json'), 'r') as f:
config = json.load(f)
telegramToken = config['telegramToken']
RPi_HOST = config['RPi_HOST']
SPREADSHEET_ID = config['SPREADSHEET_ID']
API_KEY = config['API_KEY']
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
NUM_ENTRIES_CELL = "InputData!E2"
SHEET_ID = 0
localBroker = RPi_HOST # Local MQTT broker
localPort = 1883 # Local MQTT port
#limits
MAX_TEMPERATURE = 30
MIN_TEMPERATURE = 15
CARBON_MONOXIDE_ADC_THRESH = 5000
GAS_ALL_ADC_THRESH = 12000
WARM_UP_THRESH = 300 # number of seconds from start up, after which start up sensors are sample
topicsOfInterest = ["/sensor/Chipa/humidity",
"/sensor/Chipa/temperature",
"/sensor/Chipa/CO",
"/sensor/Chipa/All_Gas",
"/sensor/livingRoom/alarm",
"/sensor/MotionHUE",
"/empty"
]
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
#Generic Init
print ("Initializing...")
def on_connect(client, userdata, flags, rc):
#MQTT configs
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("#")
def notifyTelegram(message):
print("Notifying Telegram: "+message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True #if event happens for first time, notify
else:
result = (timer - last_notify[topic]) > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer # update occurance
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if "temperature" in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram("Temperature out of bounds: "+value+"degC")
return True
if "CO" in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram("Carbon Monoxide level above threshold: "+value)
return True
if "All_Gas" in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram("Poison gas level above threshold: "+value)
return True
if "alarm" in topic:
val = float(value)
if int(val) == 1:
notifyTelegram("ALARM in Living room is On!")
return True
if "MotionHUE" in topic:
val = float(value)
if int(val) == 1:
notifyTelegram("HUE Motion sensor detected movement!")
return True
return False
def on_message(client, userdata, msg):
# The callback for when a PUBLISH message is received from the server.
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print("UTCtime: "+currTime.ctime()+","+msg.topic+" "+str(msg.payload))
if topic not in topicsOfInterest:
print("Topic: ",topic," from ",msg," not in the interest list")
return
if "empty" in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0 #to assure first time is updated
value = str(msg.payload)
if limitsExsess(topic, value) or ((timer-last_record[topic]) > RECORD_INTERVAL):
print("Updating records")
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata,rc=0):
print("DisConnected result code "+str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print("UTC: ", time.ctime(), "log: ", buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
#home_dir = os.path.expanduser('~')
home_dir = (HOME_DIR)
credential_dir = os.path.join(home_dir, '.credentials')
print("Credentials folder: ",credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(
spreadsheetId=SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
# Update InfluxDB
receiveTime = getUTC_TIME()
json_body = [
{
"measurement": topic,
"time": receiveTime,
"fields": {
"value": float(value)
}
}
]
print("Writing to InfluxDB: ", json_body)
dbclient.write_points(json_body)
return
''' #update Google Sheets
entries = number_of_entries(service)
currTime = getUTC_TIME()
line_num = str(2 + entries)
range = "InputData!A"+line_num+":D"+line_num
# How the input data should be interpreted.
value_input_option = 'USER_ENTERED'
values = [ [ currTime, topic, value ] ]
body = {'values': values}
request = service.spreadsheets().values().update(
spreadsheetId=SPREADSHEET_ID,
range=range,
valueInputOption=value_input_option,
body=body)
response = request.execute()
update_entries(service,entries+1)
return response '''
def update_entries(service,entries):
#Update Google Sheet
range = NUM_ENTRIES_CELL
value_input_option = 'USER_ENTERED'
values = [
[
entries
] ]
body = {'values': values}
request = service.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=range,
valueInputOption=value_input_option, body=body
)
response = request.execute()
return response
if __name__ == "__main__":
global service
connectedGoogle = False
connectedMQTT = False
global dbclient
global warmedUp #indicate WARM UP Threshold passed, and gas filters can be sampled
warmedUp = False #indicate WARM UP Threshold passed, and gas filters can be sampled
dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')
startTime = time.time()
#establish Telegram Bot
bot = telepot.Bot(telegramToken)
bot.getMe()
# while not connectedGoogle:
# try:
# service = create_service()
# connectedGoogle = True
# except:
# print ("failed to connect to google sheets, retrying")
# time.sleep(1)
client = mqtt.Client("monitor")
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
while not connectedMQTT:
try:
client.connect(localBroker, localPort, keepalive = 6000)
connectedMQTT = True
except:
print("Connection to MQTT broker failed")
print("exception: ",sys.exc_info()[0])
time.sleep(1)
client.loop_start()
while True:
time.sleep(10)
#client.publish("/empty","0")
if not warmedUp:
warmedUp = (time.time() - startTime) > WARM_UP_THRESH
|
5,542 | 0544c67cb14549e32b6ff8ea3215c6c65c8416ec | from typing import Any
from electionguard.ballot import CiphertextAcceptedBallot
from electionguard.decryption import compute_decryption_share_for_ballot
from electionguard.election import CiphertextElectionContext
from electionguard.scheduler import Scheduler
from electionguard.serializable import write_json_object
from fastapi import APIRouter, Body, Depends
from app.core.scheduler import get_scheduler
from ..models import (
convert_guardian,
DecryptBallotSharesRequest,
DecryptBallotSharesResponse,
)
from ..tags import TALLY
router = APIRouter()
@router.post("/decrypt-shares", tags=[TALLY])
def decrypt_ballot_shares(
request: DecryptBallotSharesRequest = Body(...),
scheduler: Scheduler = Depends(get_scheduler),
) -> Any:
"""
Decrypt this guardian's share of one or more ballots
"""
ballots = [
CiphertextAcceptedBallot.from_json_object(ballot)
for ballot in request.encrypted_ballots
]
context = CiphertextElectionContext.from_json_object(request.context)
guardian = convert_guardian(request.guardian)
shares = [
compute_decryption_share_for_ballot(guardian, ballot, context, scheduler)
for ballot in ballots
]
response = DecryptBallotSharesResponse(
shares=[write_json_object(share) for share in shares]
)
return response
|
5,543 | 7e9efb267a5464a6e53f81f63d82c28acba8bc8c | # ToDo:
"""
965. Univalued Binary Tree
Easy
A binary tree is univalued if every node in the tree has the same value.
Return true if and only if the given tree is univalued.
Note:
The number of nodes in the given tree will be in the range [1, 100].
Each node's value will be an integer in the range [0, 99].
"""
# Conditions & Concepts
"""
"""
# Code
## submit part
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isUnivalTree(self, root: TreeNode) -> bool:
## test part
class Solution:
def isUnivalTree(self, root):
"""
root: TreeNode
rtype: bool
"""
## code here
#1
"""
Success
Runtime: 28 ms, faster than 72.81% of Python3 online submissions for Univalued Binary Tree.
Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Univalued Binary Tree.
"""
class Solution:
def isUnivalTree(self, root):
vals = set()
def inorder(root):
if root:
vals.add(root.val)
inorder(root.left)
inorder(root.right)
inorder(root)
return len(vals) == 1
# Test
## Functional Test
"""
# Conditions & Concepts
"""
if __name__ == '__main__':
input1 = []
expected_output = []
for i in range(len(input1)):
if func(input1[i]) != expected_output[i]:
print("Wrong!!!", ' Output:', func(input1[i]), '; Expected Output:', expected_output[i])
else:
print("Right")
# print(func(input1[-1]))
## Performance Test
import cProfile
cProfile.run('')
## Unit Test
import unittest
class Test(unittest.TestCase):
def test(self):
pass
if __name__ == '__main__':
unittest.main() |
5,544 | ddf64ea5ecbd3aa737cd788924035cccb5544fec | ## adapted from https://matplotlib.org/examples/api/radar_chart.html
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
RESOLUTION = 1
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(np.degrees(theta), labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
spine_type = 'circle'
verts = unit_poly_verts(theta)
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
def labels_to_colors(labels):
cmap = plt.get_cmap('viridis')
color_dict = {}
unique_labels = np.unique(labels)
for i, v in enumerate(unique_labels):
color_dict[v] = cmap(i / len(unique_labels))
colors = [color_dict[l] for l in labels]
return colors
def radar_chart(data, labels, show_axis=False, fill_polygon=False):
theta = radar_factory(len(data[0]), frame='circle')
colors = labels_to_colors(labels)
fig, ax = plt.subplots(figsize=(5,5), subplot_kw=dict(projection='radar'), facecolor='white')
ax.axis('on' if show_axis else 'off')
fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)
for record, color in zip(data, colors):
ax.plot(theta, record, color=color)
if fill_polygon:
ax.fill(theta, record, facecolor=color, alpha=0.25)
return fig, ax
|
5,545 | 225687729b64f455bcc841e83105c7444efdfad3 | import math as m
def calcula_elongacao(A, ϕ, ω, t):
x = A * m.cos(ϕ + ϕ * t )
return x |
5,546 | 54ed0683d0f8d907c27e2f3809f9533556593392 | import json
from pets.pet import Pet
from store_requests.store import Store
from user_requests.user import User
SUCCESS = 200
NotFound = 404
url_site = 'https://petstore.swagger.io/v2'
new_username = "Khrystyna"
new_id = 12345
invalid_new_id = 1234
error_message = "oops we have a problem!"
store_inventory = {
"1": 1,
"4444": 2,
"teste": 1,
"string": 6738,
"Operated": 4,
"pending": 56,
"Not-Operated": 10,
"available": 4800,
"waiting list": 1,
"Unavailable": 1,
"Shortlisted": 1,
"Sold": 1,
"availasdfsadfasdfble": 1,
"not available": 1,
"Available": 1,
"YAI3424forYAI3373": 1,
"ok": 1,
"KBMAvailable": 3,
"onwork": 1,
"sold": 87,
"ddd": 1,
"Nonavailable": 1,
"Offline": 1,
"straight": 2,
"pendin": 1,
"sts": 1,
"onhold": 3,
"status": 5,
"xavailable": 1
}
Category_Dict = dict(id=36,
name='Rexy')
tag_dict = dict(id=4,
name='Dog')
PetObject = Pet(id=456,
category=Category_Dict,
name="Xixi",
photo_urls=["https://www.what-dog.net/Images/faces2/scroll001.jpg"],
tags=[tag_dict],
status='sold')
DataJsonForPets = json.dumps(PetObject.__dict__)
store = Store(id=12,
petId=12,
quantity=2,
ship_date="2018-09-12T13:52:49.901Z",
status="placed",
complete=True)
data_json_for_store = json.dumps(store.__dict__)
user = User(id=3,
username="Nini",
first_name="Vira",
last_name="Budda",
email="email@gmail.com",
password="1234567",
phone="55455545",
user_status=1)
data_json_for_user = json.dumps(user.__dict__)
|
5,547 | 918358f6e8e3f1c601b18a3c08fc6b7c024721ba | password = '#Garb1122' |
5,548 | 04e57739e6fb98cd237fbe09caecd17c728c1797 | # terrascript/external/__init__.py
import terrascript
class external(terrascript.Provider):
pass |
5,549 | 1396509f65d194eeaefa3841e152b7078abf0032 | import sys
class Bus:
def __init__(self):
self.seats=0
self.dict_seats={}
self.num_passenger = 0
def conctructor(self,seats):
self.seats=seats
for i in range(1,self.seats+1):
self.dict_seats.update({i:"Free"})
return self.dict_seats
def getOn(self, passenger_name=None):
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {passenger_name}. There is no free seat on the bus')
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index("Free"))+1
self.dict_seats.update({free_num_seat : passenger_name})
def getOn_2(self,*names):
str_names=str(names)
str_names.strip("")
list_names=str_names.split(" ")
for i in list_names:
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {i}. There is no free seat on the bus')
free_list=list(self.dict_seats.values())
free_num_seat=int(free_list.index("Free"))+1
self.dict_seats.update({free_num_seat : i})
def getOf(self,passenger_name=None):
self.num_passenger -= 1
close_list = list(self.dict_seats.values())
if passenger_name in close_list:
close_num_seat = int(close_list.index(passenger_name) + 1)
self.dict_seats.update({close_num_seat: "Free"})
else:
print(f'Passenger {passenger_name} is not on the bus')
def __str__(self):
return f'Number of seats on the bus - {self.seats}\nNumber of passenger - {self.num_passenger}'\
f'\nFree seats - {self.seats-self.num_passenger}'\
f'\nOther details - {self.dict_seats}'
|
5,550 | e77e0791ddf211807566528e9532eebb54db43b5 | from abc import ABC, abstractmethod
from datetime import datetime, timedelta, date
import os
import housekeeper
import yfinance as yf
import pandas as pd
class DataManager(ABC):
def __init__(self):
self.__myHousekeeper = housekeeper.instance_class()
self.__config_filename = "tickers_config.json"
self.__dir_list = ['Data', 'Tickers', 'Dummy1']
self.__upper_stages = 0
self.__tickers_config_list = []
self.__tickers_list = []
self.__active_tickers_list = []
self.__selected_tickers_list = []
self.__timestamp = ''
self.__markets = []
self.__last_date_flag = False
def get_config_filename(self):
return self.__config_filename
def set_config_filename(self, config_filename):
self.__config_filename = config_filename
def get_dir_list(self):
return self.__dir_list
def set_dir_list(self, dir_list):
self.__dir_list = dir_list
def get_upper_stages(self):
return self.__upper_stages
def set_upper_stages(self, upper_stages):
self.__upper_stages = dir_list
def get_last_date_flag(self):
return self.__last_date_flag
def set_last_date_flag(self, last_date_flag):
self.__last_date_flag = last_date_flag
def get_tickers_config(self):
return self.__tickers_config_list
def set_tickers_config(self, tickers_config_list):
self.__tickers_config_list = tickers_config_list
def get_tickers(self):
return self.__tickers_list
def set_tickers(self, tickers_list):
self.__tickers_list = tickers_list
def get_active_tickers(self):
return self.__active_tickers_list
def set_active_tickers(self, active_tickers_list):
self.__active_tickers_list = active_tickers_list
def get_selected_tickers(self):
return self.__selected_tickers_list
def set_selected_tickers(self, selected_tickers_list):
self.__selected_tickers_list = selected_tickers_list
def get_timestamp(self):
return self.__timestamp
def set_timestamp(self, timestamp):
self.__timestamp = timestamp
def get_markets(self):
return self.__markets
def set_markets(self, markets):
self.__markets = markets
def load_tickers_config(self):
data = self.__myHousekeeper.load_json_to_list(self.__dir_list, self.__config_filename)
self.set_tickers_config(data)
def save_tickers_config(self):
#No invocar a esta función sin previamente haber cargado tickers_config. O se sobreescribe tickers_config
tickers_config = self.get_tickers_config()
self.__myHousekeeper.list_dict_to_json(self.get_dir_list(),
self.get_upper_stages(),
self.get_config_filename(),
self.get_tickers_config())
def initialize_metadata(self):
self.load_tickers_config()
data = self.get_tickers_config()
self.set_timestamp(data['metadata'][0]['timestamp'])
self.set_tickers(data['data'])
def initialize_config_tickers(self):
# Get markets, get active_tickers
markets = []
active_tickers_ = []
self.initialize_metadata()
data = self.get_tickers()
for d in data:
markets.append(d['market'])
if d['active_type']=='stock' and d['active_flag']:
active_tickers_.append(d)
elif d['active_type']=='ETF':
active_tickers_.append(d)
self.set_active_tickers(active_tickers_)
self.set_markets(list(set(markets)))
def api_selected_tickers(self):
#Se recarga el tickers_config para info actualizada de los tickers.
self.initialize_config_tickers()
# Se despliegan los tickers activos en la UI para que el usuario elija qué tickers quiere actualizar el data.
ticker_list = self.get_tickers()
self.set_selected_tickers(ticker_list[0:3])
#return self.get_active_tickers() #TODO
def update_timeseries_download_date(self, selected_tickers_to_update):
config_ticker_list = self.get_tickers_config()
today = date.today()
# LAs fechas se guardan en formato %m-%d-%Y
[t.update({'data_update':today.strftime("%m-%d-%Y")}) for t in config_ticker_list['data'] if t in selected_tickers_to_update]
self.set_tickers_config(config_ticker_list)
self.save_tickers_config()
def load_ticker_data(self, file_name):
return self.__myHousekeeper.csv_to_df(self.__dir_list,
file_name)
def save_ticker_data(self, file_name, data):
self.__myHousekeeper.df_to_csv(self.__dir_list,
self.__upper_stages, file_name, data)
class DataManager_YahooFinance(DataManager):
def __init__(self):
super().__init__()
def download_ticker_data_from_scratch(self, ticker, ticker_key):
print('Downloading from scratch historic data of: ' + ticker)
data_csv = yf.download(ticker)
data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv.index, errors='coerce'))
data_csv['Date'] = [time.date() for time in data_csv['Date']]
data_csv.reset_index(drop=True, inplace=True)
self.save_ticker_data(ticker_key,data_csv )
return data_csv
def download_ticker_data_from_last_date(self, ticker, ticker_key, start_date):
print('Updating historic data of: ' + ticker)
# 1. Descargar datos desde la ultima fecha
data_csv = yf.download(ticker, start = start_date)
data_csv.insert(loc=0, column='Date', value=pd.to_datetime(data_csv.index, errors='coerce'))
data_csv['Date'] = [time.date() for time in data_csv['Date']]
print('Downloaded(sessions)', len(data_csv))
# 2. Cargar el csv
data_csv_local = DM_YF.load_ticker_data(ticker_key)
# 3. Apendear los datos que faltan, resetear el index y esta será la nueva varaible data_csv
data_csv = pd.concat([data_csv_local, data_csv], ignore_index = True)
data_csv.reset_index(drop=True, inplace=True)
data_csv.drop(data_csv.columns[0], axis = 1, inplace = True)
# 4. Guardar los datos sobreescribiendo el archivo anterior
self.save_ticker_data(ticker_key, data_csv)
#return data_csv
def last_date_download(self, ticker_dict):
# Local variables
last_date_str_ = ticker_dict['data_update']
ticker_key_ = ticker_dict['tickerKey']
ticker = ticker_dict['feeds']['ticker']
# 3 casos: A) last_date is None -> from scratch, B) last >= today -> no hay descarga C) start < today (else) -> download_ticker_data_from_last_date
if last_date_str_ is None: # Aquí va un download_from_scratch
print(ticker + " is not found in database, adding ----")
#data_csv = yf.download(ticker) # Aquí va un download_from_scratch
self.download_ticker_data_from_scratch(ticker, ticker_key_)
return
now = datetime.now()
last_date = datetime.strptime(last_date_str_, '%m-%d-%Y')
delta = now - last_date
start_date = last_date + timedelta(days=+1)
if delta.days <= 0: # Aquí no hay download
print('Data of ', ticker_key_ ,'is already updated')
return
else: # Función download_ticker_data_from_last_date
self.download_ticker_data_from_last_date(ticker, ticker_key_, start_date)
delta = now - start_date
print('Downloaded(days): ', delta.days)
#return data_csv
def timeseries_download_manager(self, ticker_dict):
if self.get_last_date_flag(): # From last date
print('Download ', ticker_dict['tickerKey'],' from last updated_date')
self.last_date_download(ticker_dict)
else: # From scratch
print('Download', ticker_dict['tickerKey'],' from scratch')
self.download_ticker_data_from_scratch(ticker_dict['feeds']['ticker'],ticker_dict['tickerKey'])
def download_selected_tickers(self):
# Se almacenan los tickers que van a se actualizados y se guarda la fecha de actualización en el ticker_config.
# 1.- Almacenar selected_Tickers from user selection and a default option.
#selected_tickers_list = self.api_active_tickers()
self.api_selected_tickers()
#2.- Establecer el tipo de descarga: last_date(True) / from scratch(False, default)
self.set_last_date_flag(False)
#3.- Descargar los selected_tickers. Enganchar con timeseries_download_manager
[self.timeseries_download_manager(t) for t in self.get_selected_tickers()]
# 4.- Actualizar data_update en tickers_config de los tickers descargados
self.update_timeseries_download_date(self.get_selected_tickers())
def download_market_data(self, markets, _last_date_flag = False): #TODO: especificar el subconjunto en selected tickers. Para que se actualice la fecha data_update
print('Download market ticker')
#1.- Almacenar en selected_ticker los tickers correspondientes a un market
#Se recarga el tickers_config para info actualizada de los tickers.
self.initialize_config_tickers()
# Se despliegan los tickers activos en la UI para que el usuario elija qué tickers quiere actualizar el data.
active_ticker_list = self.get_active_tickers()
ticker_list = [t for t in active_ticker_list if t['market'] in markets]
self.set_selected_tickers(ticker_list)
#2.- Establecer el tipo de descarga: last_date(True) / from scratch(False, default)
self.set_last_date_flag(_last_date_flag)
#3.- Descargar los selected_tickers. Enganchar con timeseries_download_manager
#tickers = self.get_active_tickers()
#[DM_YF.download_ticker_data_from_scratch(t['feeds']['ticker'], t['tickerKey']) for t in tickers if t['market'] in markets]
[self.timeseries_download_manager(t) for t in self.get_selected_tickers()]
# 4.- Actualizar data_update en tickers_config de los tickers descargados
self.update_timeseries_download_date(self.get_selected_tickers())
def download_all_markets(self):
print('Download ALL MARKETS')
self.download_market_data(self.get_markets())
|
5,551 | 3e1c2d0c5bb30d093a99f10020af14db5436bf02 | # -*- coding: utf-8 -*-
"""microcms package, minimalistic flatpage enhancement.
THIS SOFTWARE IS UNDER BSD LICENSE.
Copyright (c) 2010-2012 Daniele Tricoli <eriol@mornie.org>
Read LICENSE for more informations.
"""
VERSION = (0, 2, 0)
|
5,552 | 844b8e2d4f05a51282b356c995f2733d6935a5d6 | # We will try to implement add noise to audio file and filter it using Mean and Median Filters.
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.io.wavfile import read
from scipy.io.wavfile import write
rate,audio_original = read('Audio_Original.wav')
audio = audio_original[:,0]
write("Audio_Modified.wav",rate,audio)
print (audio.shape[0])
print (audio.shape[0]/rate) # Time of track
# print (audio.shape[1]) # No.of Channels
def Plot_Audio(audio): # Function to plot Audio Signal
s = audio.shape[0]
time = np.arange(s)
plt.plot(time,audio)
plt.show()
def Add_Noise(audio,mu = 0,sigma = 1): # Function to add Noise
"""
Adding Gaussian Noise
"""
gaussian_noise = np.random.normal(0, 1, audio.shape[0])
audio = audio + gaussian_noise
return audio
def Median_Filter(audio,M): # Function to apply Median Filter to audio signal
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p,q,s = M,audio.shape[0]- M,audio.shape[0]
audio_change = np.zeros(s+2*M)
audio_change[M:s+M] = audio
audio_new = np.zeros(s)
for i in range(M,s+M):
audio_new[i-M] = np.median(audio_change[i-M:i+M])
time = np.arange(s)
return audio_new,time
def Mean_Filter(audio,M): # Function to apply Mean Filter to audio signal
"""
audio = signal on which filter needs to be applied
M = Bandwidth of filter
"""
p,q,s = M,audio.shape[0]- M,audio.shape[0]
audio_change = np.zeros(s+2*M)
audio_change[M:s+M] = audio
audio_new = np.zeros(s)
for i in range(M,s+M):
audio_new[i-M] = np.mean(audio_change[i-M:i+M])
time = np.arange(s)
return audio_new,time
Plot_Audio(audio)
audio = Add_Noise(audio)
Plot_Audio(audio)
write("Audio_with_Noise.wav",rate,audio) # Creating a Audio signal with noise
audio_new_mean,time_new = Mean_Filter(audio,2)
Plot_Audio(audio_new_mean)
write("Audio_with_Noise_Filtered_Mean.wav",rate,audio_new_mean) # Creating filtered audio signal using Mean Filter
audio_new_median,time_new = Median_Filter(audio,2)
Plot_Audio(audio_new_median)
write("Audio_with_Noise_Filtered_Median.wav",rate,audio_new_median) # Creating filtered audio signal using Median Filter
|
5,553 | 9bf8834b12bcace0f6daf64adae1babe78bb04fa | '''
Created on Nov 1, 2013
@author: hanchensu
'''
from numpy import *
import numpy as np
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()
b = 0; m,n = shape(dataMatrix)
matrix = mat([[1,2],[3,4],[5,6]])
m,n= shape(matrix)
matA = mat([[1,2],[2,3],[5,6]])
matB = mat([1,2,3]).transpose()
print matA
print matB
print multiply(matA,matB)
# x1 = np.arange(9.0).reshape((3, 3))
# x2 = np.arange(3.0)
# print x1
# print x2
# print np.multiply(x1, x2) |
5,554 | 92b24fe82929ed4590e5350188673c2245136d03 | from db import do_command, do_command_no_return, do_insert
def get_grocery(upc):
cmd = "SELECT name FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
def grocery_input(upc, name):
cmd = "INSERT INTO grocery (name, upc) VALUES (?, ?)"
rtVal = do_insert(cmd, [name, upc])
return rtVal
def get_grocery_id(upc):
cmd = "SELECT id FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
def get_grocery_name(upc):
cmd = "SELECT name FROM grocery WHERE upc = ?"
rtVal = do_command((cmd, [upc]))
return rtVal[0]
def grocery_exists(upc):
cmd = "SELECT id FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
return bool(len(rtVal))
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = "DELETE FROM inventory WHERE grocery_id = ?"
do_command_no_return(cmd, [id])
cmd = "DELETE FROM changes where grocery_id = ?"
do_command_no_return(cmd, [id])
cmd = "DELETE FROM grocery where id = ?"
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = "INSERT INTO produce (name, plu) VALUES (?, ?)"
rtVal = do_insert(cmd, [name, plu])
return rtVal
def get_produce(plu):
cmd = "SELECT name FROM produce WHERE plu = ?"
rtVal = do_command(cmd, [plu])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))} |
5,555 | b21796a9e10314f80cac3151d1fdbb139966303f | import numpy as np
import scipy.io as sio
import os
import torch
from torchvision.utils import save_image
from tools import *
def test(config, base, loaders, brief):
compute_and_save_features(base, loaders)
results = evalutate(config, base, brief)
return results
def evalutate(config, base, brief=False):
results = {}
for mode in config.modes:
print(mode)
for number_shot in config.number_shots:
print(number_shot)
cmc, map = evaluate_sysymm01(base.save_features_path, mode, number_shot)
results['{},{}'.format(mode, number_shot)] = [cmc, map]
if brief: break
if brief: break
return results
def compute_and_save_features(base, loaders):
def compute_features(images):
images_f = fliplr(images)
images = images.to(base.device)
images_f = images_f.to(base.device)
features = base.encoder(base.process_images_4_encoder(images, True, True))
features_f = base.encoder(base.process_images_4_encoder(images_f, True, True))
features, _, _, _ = base.embeder(features)
features_f, _, _, _ = base.embeder(features_f)
features = features + features_f
if base.part_num == 1:
features = torch.unsqueeze(features, -1)
return features
def normalize_and_resize_feature(features):
# normlize
norm = torch.norm(features, dim=1, keepdim=True)
features = features / norm.repeat([1, features.size(1), 1])
# resize
features = features.view(features.size(0), -1)
return features
class XX:
def __init__(self):
self.val = {}
def update(self, key, value):
if key not in self.val.keys():
self.val[key] = value
else:
self.val[key] = np.concatenate([self.val[key], value], axis=0)
def get_val(self, key):
if key in self.val.keys():
return self.val[key]
else:
return np.array([[]])
print('Time:{}. Start to compute features'.format(time_now()))
# compute features
# base._resume_model(test_step)
base.set_eval()
features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()
with torch.no_grad():
for i, data in enumerate(loaders.rgb_all_loader):
# load data
images, pids, cids, _ = data
images = base.G_rgb2ir(images.to(base.device)).data.cpu()
# forward
features = compute_features(images)
# meter
features_meter.update(features.data)
pids_meter.update(pids.data)
cids_meter.update(cids.data)
for i, data in enumerate(loaders.ir_all_loader):
# load data
images, pids, cids, _ = data
# forward
features = compute_features(images)
# meter
features_meter.update(features.data)
pids_meter.update(pids.data)
cids_meter.update(cids.data)
print('Time:{}. Start to normalize features.'.format(time_now()))
# normalize features
features = features_meter.get_val()
features = normalize_and_resize_feature(features)
features = features.data.cpu().numpy()
pids = pids_meter.get_val_numpy()
cids = cids_meter.get_val_numpy()
print('Time: {}. Note: Start to save features as .mat file'.format(time_now()))
# save features as .mat file
results = {1: XX(), 2: XX(), 3: XX(), 4: XX(), 5: XX(), 6: XX()}
for i in range(features.shape[0]):
feature = features[i, :]
feature = np.resize(feature, [1, feature.shape[0]])
cid, pid = cids[i], pids[i]
results[cid].update(pid, feature)
pid_num_of_cids = [333, 333, 533, 533, 533, 333]
cids = [1, 2, 3, 4, 5, 6]
for cid in cids:
a_result = results[cid]
xx = []
for pid in range(1, 1+ pid_num_of_cids[cid - 1]):
xx.append([a_result.get_val(pid).astype(np.double)])
xx = np.array(xx)
sio.savemat(os.path.join(base.save_features_path, 'feature_cam{}.mat'.format(cid)), {'feature': xx})
def save_images(base, current_step):
#base.set_eval()
with torch.no_grad():
fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images).detach()
xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images, base.fixed_real_ir_images], dim=0)
save_image((xxxx.data.cpu() + 1.0) / 2.0,
os.path.join(base.save_images_path, 'image_{}.jpg'.format(current_step)), nrow=base.fixed_real_rgb_images.size(0), padding=0) |
5,556 | 02381f28ef20aa0c2c235ef6563e1810a5931e35 | from qiskit import QuantumCircuit,execute,Aer
from qiskit.visualization import plot_histogram
import matplotlib.pyplot as plt
qc_ha=QuantumCircuit(4,2)
qc_ha.x(0)
qc_ha.x(1)
qc_ha.barrier()
qc_ha.cx(0,2)
qc_ha.cx(1,2)
qc_ha.ccx(0,1,3)
qc_ha.barrier()
qc_ha.measure(2,0)
qc_ha.measure(3,1)
#qc_ha.draw(output='mpl')
counts = execute(qc_ha,Aer.get_backend('qasm_simulator')).result().get_counts()
plot_histogram(counts)
plt.show()
|
5,557 | 539726df0e631c7a8edabf50fd739ee0497e3e97 | from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn import tree
import pickle as pk
X = pk.load(file=open('../data/temp/train.pkl', 'rb'))
y = pk.load(file=open('../data/temp/label.pkl', 'rb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
def train_model(model_name):
if model_name == "LinearRegression":
model = LinearRegression()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Lasso":
model = Lasso(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "Ridge":
model = Ridge(alpha=1)
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if model_name == "tree":
model = tree.DecisionTreeRegressor()
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print(score)
if __name__ == '__main__':
model_chosen = "Lasso"
train_model(model_chosen)
|
5,558 | ff358136bc96fa7f3eb41d019ddfd10fc4db8f0d | class Person:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def GetName(self):
return (self.fname + ' ' + self.lname)
|
5,559 | f6fe33e04ccdca1d9714caec412478d0cfc8b363 | from flask import Flask, request
from flask import render_template
import sqlite3
import datetime
app = Flask(__name__)
@app.route('/')
def index(date = ""):
date = request.args.get('date')
if not date:
now = datetime.datetime.now()
date = "%02d.%02d.%04d" % (now.day, now.month, now.year)
conn = sqlite3.connect("data.db")
c = conn.cursor()
res = c.execute("SELECT STRFTIME('%%H', date), AVG(temp), AVG(hum) FROM data "
"WHERE STRFTIME('%%d.%%m.%%Y', date)='%s' "
"GROUP BY STRFTIME('%%H', date) " % (date, ))
hour = list()
temp = list()
hum = list()
for row in res:
hour.append(row[0])
temp.append("%.1f" % row[1])
hum.append("%.1f" % row[2])
return render_template('index.html', date = date, hour = hour, temp = temp, hum = hum)
if __name__ == '__main__':
app.debug = True
app.run(host = "127.0.0.1", port = 8888)
|
5,560 | 89d0d5d13c5106c504c6727c7784f048a30495dc | # Generated by Django 3.2 on 2021-05-22 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recuerdos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo_evento', models.CharField(blank=True, max_length=100, null=True)),
('foto1', models.ImageField(blank=True, null=True, upload_to='recuerdos')),
('foto2', models.ImageField(blank=True, null=True, upload_to='recuerdos')),
('foto3', models.ImageField(blank=True, null=True, upload_to='recuerdos')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Recuerdo',
'verbose_name_plural': 'recurdo',
},
),
]
|
5,561 | c418b9b6903ebdad204a3a55f2384a94a3be0d09 | """
Pattern matching problem
Boyer Moore algorithm
First is my attempt, below is the code provided in the book
Idea:
Optimize brute force approach using 2 heuristics:
- Looking-Glass: start searches from last character of the
pattern and work backwards
- Character-Jump: During testing of a pattern P, a mismatch
in T[i] = c with corresponding pattern P[k] is handled:
a) if C is not contained in P, shift P completely past i.
b) if c is contained in P shift P until an occurrence of c
gets aligned with T[i]
"""
def find_boyer_moore(T, P):
""" return lowest index of T at which the substring P begins or -1"""
n, m = len(T), len(P)
if m == 0: return 0
last = {} # Using hash table for fast access
for k in range(m):
last[P[k]] = k
i = m - 1 # i index at T, k index at P
k = m - 1 # j index of last occurrence of T[i] in P
while i < n:
if T[i] == P[k]: # if chars are equal
""" INCORRECT PART """
i -= 1 # normal iteration
k -= 1
if k == 0:
return i # check if Patter is complete
else:
# if j < k (remember k index at P)
# shift i += m - (j+1)
# if j > k
# shift i += m - k
j = last.get(T[i], -1) # -1 if item not there
i += m - (min(k, j+1))
k = m - 1
return -1
def find_boyer_moore2(T, P):
""" return lowest index of T at which the substring P begins or -1"""
n, m = len(T), len(P)
if m == 0: return 0
last = {} # Using hash table for fast access
for k in range(m):
last[P[k]] = k
i = m - 1 # i index at T, k index at P
k = m - 1 # j index of last occurrence of T[i] in P
while i < n:
if T[i] == P[k]: # if chars are equal
if k == 0:
return i # check if Patter is complete
else:
i -= 1 # normal iteration
k -= 1
else:
j = last.get(T[i], -1) # -1 if item not there
i += m - (min(k, j+1))
k = m - 1
return -1
# T = "abacaabadcabacabaabb"
T = "ddcbacab"
P = "abacab"
print(find_boyer_moore2(T, P)) |
5,562 | af00c6f443426b1f61e1816d7d14ebc7e6871a82 | import csv
import us
from flask import abort, Flask, request, render_template
app = Flask(__name__) # pylint: disable=invalid-name
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None),
'fips': request.args.get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
|
5,563 | ac1d38f550e548dff6ba226dbfc3dd1e5ff876a8 | from django.db import models
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Project(models.Model):
actual_developer = models.ForeignKey(User,null = True,blank=True, on_delete=models.CASCADE)
# actual_developer = models.CharField(User,null = True,blank=True, max_length=200)
projects_name = models.CharField(max_length=100)
project_hours = models.CharField(max_length=100)
developer_name = models.CharField(max_length=255)
Month_Cycle = models.CharField(max_length = 1000, blank=True, null=True)
mailing_hrs = models.CharField(max_length=100,null=True,blank=True)
developer_email = models.EmailField()
expected_daily_hours = models.CharField(max_length=200, null=True, blank=True)
expected_cycle_hours = models.CharField(max_length=200, null=True, default = "176 Hr")
cycle_hour_diff = models.IntegerField(null=True, default=0)
def get_absolute_url(self):
return reverse('project')
class Holidays(models.Model):
holidays = models.DateField()
|
5,564 | 65ea40ad1c1bf6bf23aed5316b91862c9cdc353d | __author__ = "Rick Sherman"
__credits__ = "Jeremy Schulman, Nitin Kumar"
import unittest
from nose.plugins.attrib import attr
from jnpr.junos import Device
from jnpr.junos.utils.scp import SCP
from mock import patch
@attr('unit')
class TestScp(unittest.TestCase):
def setUp(self):
self.dev = Device(host='1.1.1.1')
@patch('paramiko.SSHClient')
def test_scp_open(self, mock_connect):
from scp import SCPClient
self.dev.bind(scp=SCP)
assert isinstance(self.dev.scp.open(), SCPClient)
@patch('paramiko.SSHClient')
def test_scp_close(self, mock_connect):
self.dev.bind(scp=SCP)
self.dev.scp.open()
self.assertEqual(self.dev.scp.close(), None)
@patch('paramiko.SSHClient')
def test_scp_context(self, mock_connect):
with SCP(self.dev) as scp:
scp.get('addrbook.conf')
@patch('jnpr.junos.device.os')
@patch('__builtin__.open')
@patch('paramiko.config.SSHConfig.lookup')
@patch('paramiko.SSHClient')
@patch('paramiko.proxy.ProxyCommand')
def test_scp_proxycommand(self, os_mock, open_mock, mock_paramiko, mock_connect, mock_proxy):
os_mock.path.exists.return_value = True
self.dev._sshconf_path = '/home/rsherman/.ssh/config'
with SCP(self.dev) as scp:
scp.get('addrbook.conf')
mock_proxy.assert_called_any()
|
5,565 | 75ef5dd2b82cf79819f18045559f9850c74bb55a | from flask import Blueprint, request, make_response
from flask_expects_json import expects_json
from server.validation.schemas import guest_calendar_schema
from tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid
from tools.for_db.work_with_links import get_link
from tools.build_response import build_response
guest_calendar_post = Blueprint('guest_calendar_post', __name__)
@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])
@expects_json(guest_calendar_schema)
def booking(link_id):
request_body = request.get_json()
link = get_link(link_id)
if link is None:
return build_response('link id is invalid', 401)
admin_id = link.admin_id
try:
uuid = add_booking_info_and_get_uuid(request_body['start'], request_body['end'], admin_id,
request_body['guest_name'], request_body['guest_email'],
request_body['topic'] if 'topic' in request_body else None)
request_body['uuid'] = uuid
except Exception:
return build_response('already booked or deleted', 409)
return make_response(request_body, 200)
|
5,566 | 5261346f96e7520b6ef75a292b3d44a6f00d868c | # Imports
from __future__ import print_function
import numpy
from numpy.random import randint
from enum import Enum
__all__ = ["common", "plot"]
class result(Enum):
CRIT = 16
HIT = 8
EVADE = 4
FOCUS = 2
BLANK = 1
def result_str(res):
str = ""
if res & result.BLANK:
str += "BLANK"
if res & result.FOCUS:
if len(str):
str += "|"
str += "FOCUS"
if res & result.HIT:
if len(str):
str += "|"
str += "HIT"
if res & result.CRIT:
if len(str):
str += "|"
str += "CRIT"
if res & result.EVADE:
if len(str):
str += "|"
str += "EVADE"
return str
# DICE CLASSES DEFINITIONS
__attack_die_faces__ = [result.CRIT, result.HIT, result.HIT, result.HIT, result.FOCUS, result.FOCUS, result.BLANK, result.BLANK]
__evade_die_faces__ = [result.EVADE, result.EVADE, result.EVADE, result.FOCUS, result.FOCUS, result.BLANK, result.BLANK, result.BLANK]
class die:
def __init__ (self):
self.rerolled = False
def __str__(self):
return result_str(self.result)
@staticmethod
def __roll_die__(face_list):
return face_list[randint(0, 8)]
def equals(self, result):
return self.result & result
def change(self, to):
self.result = to
class attack_die(die):
def __init__(self):
die.__init__(self)
self.__roll__()
def __roll__(self):
self.result = self.__roll_die__(__attack_die_faces__)
def reroll(self):
if not self.rerolled:
self.__roll__()
self.rerolled = True
return True
return False
class evade_die(die):
def __init__(self):
die.__init__(self)
self.__roll__()
def __roll__(self):
self.result = die.__roll_die__(__evade_die_faces__)
def reroll(self):
if not self.rerolled:
self.__roll__()
self.rerolled = True
return True
return False
# DICE LIST METHOD DEFINITIONS
def count_relevant_results(dice_list, relevant_results):
count = 0
for i in range(len(dice_list)):
if dice_list[i].result & relevant_results:
count += 1
return count
def roll_attack_dice(number):
dice_results = []
for i in range(number):
dice_results.append(attack_die())
return dice_results
def roll_evade_dice(number):
dice_results = []
for i in range(number):
dice_results.append(evade_die())
return dice_results
# DICE LIST MODIFICATION DEFINITITONS
class perform(Enum):
FOR_ALL = 7
ONCE = 1
class change:
def __init__(self, rule, from_result, to_result):
self.rule = rule
self.from_result = from_result
self.to_result = to_result
def modify_dice_list(self, dice_list):
for i in range(len(dice_list)):
if dice_list[i].equals(self.from_result):
dice_list[i].change(self.to_result)
if self.rule == perform.ONCE:
return dice_list
return dice_list
class reroll:
def __init__(self, rule, from_result):
self.rule = rule
self.from_result = from_result
def modify_dice_list(self, dice_list):
for i in range(len(dice_list)):
if dice_list[i].equals(self.from_result):
if dice_list[i].reroll() and self.rule == perform.ONCE:
return dice_list
return dice_list
# Debug
def __print_dice_list(dice_list):
for i in range(len(dice_list)):
print(dice_list[i], end=" ")
print("")
def get_dice_chances(number_of_dice, dice_roll_function, relevant_results, enemy_modifications, friendly_modifications):
relevant_counts = numpy.zeros((8))
num_iterations = 200000
for i in range(num_iterations):
dice_list = dice_roll_function(number_of_dice)
# Perform modifications
for j in range(len(enemy_modifications)):
dice_list = enemy_modifications[j].modify_dice_list(dice_list)
for j in range(len(friendly_modifications)):
dice_list = friendly_modifications[j].modify_dice_list(dice_list)
relevant_count_for_this_roll = count_relevant_results(dice_list, relevant_results)
relevant_counts[relevant_count_for_this_roll] += 1
chances = numpy.zeros((8))
for i in range(len(chances)):
chances[i] = float(relevant_counts[i]) / float(num_iterations)
return chances
def get_hit_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):
return get_dice_chances(number_of_dice, roll_attack_dice, result.HIT | result.CRIT, enemy_modifications, friendly_modifications)
def get_evade_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):
return get_dice_chances(number_of_dice, roll_evade_dice, result.EVADE, enemy_modifications, friendly_modifications)
def get_crit_chances(number_of_dice, enemy_modifications=[], friendly_modifications=[]):
return get_dice_chances(number_of_dice, roll_attack_dice, result.CRIT, eenemy_modifications, friendly_modifications)
def hits_vs_evade(hit_chances, evade_chances):
chances = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for i in range(1, len(hit_chances)):
for j in range(i):
chances[i - j] = chances[i - j] + (hit_chances[i] * evade_chances[j])
total = 0.0
for i in range(1, len(chances)):
total = total + chances[i]
chances[0] = 1.0 - total
return chances
def average_chance(chance_list):
avg = 0.0
for i in range(1, len(chance_list)):
avg = avg + (i * chance_list[i])
return avg |
5,567 | 137842d50355563b2df6c2fc48864c01a22afa80 | # -*- coding:utf-8 -*-
# pylint: disable=line-too-long
_BASE_REPRESENTATIONS = [
"Primitive(field='f1', op='eq', value='value')",
"Primitive(field='f1', op='eq', value=42)",
"Primitive(field='f1', op='eq', value=3.14)",
"Primitive(field='f1', op='eq', value=True)",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])",
"Condition(op=Operator.OR, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])",
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5)])",
"Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value='aaa'), Primitive(field='f2', op='eq', value=5), Primitive(field='f1', op='eq', value='bbb')])",
"Condition(op=Operator.OR, values=[Condition(op=Operator.AND, values=[Primitive(field='f1', op='eq', value=50), Primitive(field='f2', op='eq', value='aaa')]), Primitive(field='f2', op='eq', value='bbb')])",
]
REPRESENTATIONS = _BASE_REPRESENTATIONS + [
val.replace(
"field='f1', op='eq", "field='f1', op='gt"
).replace(
"field='f2', op='eq'", "field='f2', op='match'"
)
for val in _BASE_REPRESENTATIONS
]
|
5,568 | 1605396a6edb31dd6fe9238a0506f8cfeb794d07 | def play_43():
n=int(input('Enter n :'))
l=[]
for i in range(n):
l.append(int(input()))
for i in range(n-1):
for j in range(i+1,n):
if l[i]<l[j]:
continue
return "no"
return "Yes"
play_43()
|
5,569 | 2de62c73507acac597d70557adfe8286e2f28a1f | def assert_number(arg):
if not isinstance(arg, (int, float)):
raise TypeError(f"Expected number, got {type(arg)}")
|
5,570 | 788d9fa03c4311a8077d492b1a2b06d1f88826a3 | import numpy as np
import torch
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device("cpu"), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if "torch" in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert "torch" in str(dtype), "dtype and input type does not match"
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)
else: # np
assert "numpy" in str(dtype), "dtype and input type does not match"
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask # , lengths
def pad_sequences_2d(sequences, dtype=torch.long):
""" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first two dims has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
Examples:
>>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
>>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
>>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
>>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
>>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
>>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
# TODO add support for numpy array
"""
bsz = len(sequences)
para_lengths = [len(seq) for seq in sequences]
max_para_len = max(para_lengths)
sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
max_sen_len = max([max(e) for e in sen_lengths])
if isinstance(sequences[0], torch.Tensor):
extra_dims = sequences[0].shape[2:]
elif isinstance(sequences[0][0], torch.Tensor):
extra_dims = sequences[0][0].shape[1:]
else:
sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]
extra_dims = ()
padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)
mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
for b_i in range(bsz):
for sen_i, sen_l in enumerate(sen_lengths[b_i]):
padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
mask[b_i, sen_i, :sen_l] = 1
return padded_seqs, mask # , sen_lengths
def find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type="torch"):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]
Args:
st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities
ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities
top_n (int): return topN pairs with highest values
prob_thd (float):
tensor_type: str, np or torch
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
if tensor_type == "torch":
st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()
product = np.einsum("bm,bn->bmn", st_prob, ed_prob)
# (N, L, L) the lower part becomes zeros, start_idx < ed_idx
upper_product = np.triu(product, k=1)
return find_max_triples_from_upper_triangle_product(upper_product, top_n=top_n, prob_thd=prob_thd)
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1) # (N, 3)
|
5,571 | dfc412acc9b69f50396680db1b9f6feafe162996 | import io
import os
from flask import Flask
from werkzeug.datastructures import FileStorage
import pytest
PNG_FILE = os.path.join(os.path.dirname(__file__), 'flask.png')
JPG_FILE = os.path.join(os.path.dirname(__file__), 'flask.jpg')
class TestConfig:
TESTING = True
MONGODB_DB = 'flask-fs-test'
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
class TestFlask(Flask):
def configure(self, *storages, **configs):
import flask_file_system as fs
for key, value in configs.items():
self.config[key] = value
fs.init_app(self, *storages)
@pytest.fixture
def app():
app = TestFlask('flaskfs-tests')
app.config.from_object(TestConfig)
yield app
@pytest.fixture
def binfile():
return PNG_FILE
@pytest.fixture
def pngfile():
return PNG_FILE
@pytest.fixture
def jpgfile():
return JPG_FILE
class Utils:
def filestorage(self, filename, content, content_type=None):
return FileStorage(
self.file(content),
filename,
content_type=content_type
)
def file(self, content):
if isinstance(content, bytes):
return io.BytesIO(content)
elif isinstance(content, str):
return io.BytesIO(content.encode('utf-8'))
else:
return content
@pytest.fixture
def utils(faker):
return Utils()
@pytest.fixture
def mock_backend(app, mocker):
app.config['FS_BACKEND'] = 'mock'
mock = mocker.patch('flask_file_system.backends.mock.MockBackend')
yield mock
|
5,572 | c80ecb97c8863b724169715b766024ce824b9225 | import numpy as np
# UM TRABALHO FEITO PELA GRANDE DUPLA PEQUENA Mag e Rud
def main():
a = int(input("Informe a sua opção (Aleatório = 0, Você escolhe = outro numero): "))
if(a != 0):
linhas = int(input("Informe o número de linhas da matriz: "))
colunas = int(input("Informe o número de colunas da matriz: "))
else:
linhas = np.random.randint(1, 6)
colunas = np.random.randint(1, 6)
lista = np.floor(16 * np.random.random((linhas, colunas)))
primeiro = lista[0, 0]
quantVezes = 0
media = 0
for i in range(linhas):
for j in range(colunas):
if(lista[i][j] == primeiro):
quantVezes += 1
media += lista[i][j] / (linhas * colunas)
menorDiferenca = lista[0, 0]
somaPar = 0
for i in range(linhas):
for j in range(colunas):
if(np.abs(lista[i][j] - media) < np.abs(menorDiferenca - media)):
menorDiferenca = lista[i][j]
if(lista[i][j] % 2 == 0):
somaPar += lista[i][j]
u, c = np.unique(lista, return_counts = True)
quantRepetido = linhas * colunas - len(u)
print(lista)
print(f"O maior valor da lista é: {np.amax(lista)}")
print(f"A soma dos elementos é: {np.sum(lista)}")
print(f"A quantidade de vezes que o primeiro elemento aparece: {quantVezes}")
print(f"A média é: {media}")
print(f"O Valor mais próximo da média é: {menorDiferenca}")
print(f"A soma dos valores múltiplos de 2 é: {somaPar}")
print(f"A quantidade de números repetidos é: {quantRepetido}")
print(f"Lista sem números repetidos: {u}")
main()
|
5,573 | 0f4bdaecef356e01cbef527d4886564d9ef840fa | from erlport.erlterms import Atom
from scipy.optimize import basinhopping
import numpy as np
import qsim
class Bounds(object):
'''Required for acceptance testing in scipy.optimize.basinhopping'''
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [x[i] + c <= x[i+1] for i, c in enumerate(self.costs[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs["x_new"]
return self.is_valid(x)
def SLSQP_constraints(self):
'''Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i'''
funs = [lambda x: x[i + 1] - x[i] + c
for i, c in enumerate(self.costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [lambda x: x[i] for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
# im matrix form
n = len(self.costs)
# -x_i <= 0
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
# tmax constraints
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
# cost constraints
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n-1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {"slsqp": {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)},
"cobyla": [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
'''Return bounds as sequence'''
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
def optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):
'''Erlang Entry Point to Optimization Module'''
B_table = parse_behaviours(behaviours)
BTG = parse_edgelist(btg)
F = parse_prediction(prediction)
path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)
return list(path), map(lambda x: int(x) + start, t.x)
def best_path(paths, Behaviour_Table, BTG, F, dt=1.,
maxiter=20, Acc0=None, method="SLSQP"):
'''
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
'''
# Given a particular path, find the optimal times to transition
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table,
BTG, t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds(),
'constraints': bounds.SLSQP_constraints()[method.lower()],
}
result = basinhopping(L, x0.copy(),
accept_test=bounds,
take_step=step_taker, stepsize=10*dt,
niter=maxiter, T=initial_T,
interval=20,
minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)),
key=lambda x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0,
q_acc_model=qsim.integrator, q_acc_model_args=[], q_model_kwargs={},
q_relief_model=qsim.linear_relief,
deadtime_penalty=4):
'''Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
'''
B = np.vstack(BTable[bid] for bid in path) # Behaviour Matrix (d,4)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0., (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
'''Simulate the queue effects, and then evaluate the objective function
on the simulation result'''
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=BTable,
Acc0=Acc0, relief_mode_kwargs={"rate": 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return (-obj(x, B, cum_Z, taus, dt=dt)
+ 0.25* avg_rates.dot(Deadtimes) ** 2
- avg_rates.sum()*Acc.sum()) # ????
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
# Parsers ###############################################################
def parse_edgelist(edges):
'''[((a, b), tau)] -> {(a, b): tau}'''
return {(a, b): tau for (a, b), tau in edges}
def parse_behaviours(behaviours, dtype=np.float32):
'''[(bid, [bvec])] -> {bid: <bvec>}'''
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
'''[[float]] -> np.array(...) of same shape'''
return np.array(F) # Might not work, will check back later
# Optimization ###############################################################
def initial_soln(path, t_max):
'''Evenly Distributed, no check for taus'''
j = t_max / len(path)
return np.array([(i + 1) * j for i in xrange(len(path) - 1)])
def transition_costs(path, btg):
'''Sequence of transition costs associated with the prescribed path'''
return [btg[(path[i], path[i+1])] for i in xrange(len(path) - 1)]
def range_sum(cum_F, a, b, penalty=-1000):
'''Penalty brutally dominates any out-of-index operation...'''
z = cum_F.shape[-1] - 1
if (not 0 <= a <= z) or (not 0 <= b <= z):
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
def flow_served(cum_F, times, costs, queue_model=None, dt=1.):
'''Times: [t1, ..., td],
costs: [t_{b0, b1}, t_{b1, b2}, ...]
Returns the Fulfillment matrix associated with each behaviour segment.'''
discr_index = lambda x: int(x / dt) - 1
t_steps = [0] + map(discr_index, times)
t_steps.append(cum_F.shape[-1] - 1) # t_max
c_steps = [0] + map(discr_index, costs)
result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i + 1])
for i in xrange(len(costs) + 1)])
return result
def obj(times, B, cum_F, costs, dt=1.):
'''Objective Function for Hillclimbing'''
Z = B * flow_served(cum_F, times, costs, dt=dt)
return Z.sum()
def barrier(times, path, BTG):
'''Handles Linear/causality Constraints with respect to transitions'''
t = [0] + list(times)
S = 0.
for i in xrange(len(path) - 1):
edge = (path[i], path[i + 1])
tau = BTG[edge]
S += min(0, (t[i + 1] - t[i] - tau)) # Only accrue if constraint is voilated
return S
|
5,574 | aa801bc8398cdf69a15d04188dd8429e4624150e | import numpy as np
#read data from file
#read data from file
theFile = open('datapri.txt','r')
temp = []
#n la so phan tu cua mang mau
n = int(theFile.readline().format())
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
arr = np.random.rand(n,n)
k = 0
for i in range(n):
for j in range(n):
arr[i,j] = temp[k]
k = k+1
# print(arr)
#tao 1 mang de chua ma tran cac dinh ke
ke = []
for i in range(n):
ke.append([])
trongso = []
for i in range(n):
trongso.append([])
#dua cac dinh vao mang ke
for i in range(n):
for j in range(n):
if(arr[i,j] != 0):
ke[i].append(j)
trongso[i].append(arr[i,j])
print(trongso[1])
# available = [False for i in range(n)]
# vertex = [0 for i in range(n)]
#
# def CorlorGraph():
# #khoi tao dinh dau tien duoc to mau dau tien
# vertex[0] = 0
#
# #khoi tao cac dinh con lai chua duoc to mau
# for i in range(1,n):
# vertex[i] = -1
#
# #to mau cac dinh con lai
# for i in range(1,n):
# for j in (ke[i]):
# if(vertex[j] != -1):
# available[vertex[j]] = True
#
# crz = 0
# for k in range(n):
# if (available[k] == False):
# break
# crz = crz + 1
# vertex[i] = crz
# for j in (ke[i]):
# if (vertex[j] != -1):
# available[vertex[j]] = False
# for i in range(n):
# print("ke",i,"-",ke[i])
# CorlorGraph()
# print("Cac dinh da duoc to mau: ")
# for i in range(n):
# print(i,vertex[i])
|
5,575 | 1284de6474e460f0d95f5c76d066b948bce59228 | #!usr/bin/env python3
from argoverse.map_representation.map_api import ArgoverseMap
from frame import Frame
import matplotlib.pyplot as plt
import pickle
import numpy as np
from argo import draw_local_map
# Frames in cluster visualization
def frame_in_pattern_vis(xmin, xmax, ymin, ymax):
dataset = 'ARGO'
if dataset == 'NGSIM':
with open("data_sample/a_mixture_model_NGSIM_200", "rb") as mix_np: # load saved mixture model
mix_model = pickle.load(mix_np)
with open("data_sample/frame_US_101_200", "rb") as frame_np: # load saved frames
load_frames = pickle.load(frame_np)
print('everything loaded')
# visualize frames from the same pattern
pattern_num = np.argmax(np.array(mix_model.partition))
pattern_idx = idx = np.where(np.array(mix_model.z) == pattern_num)
pattern_idx = np.asarray(pattern_idx)
pattern_idx = pattern_idx[0].astype(int)
plt.ion()
for i in range(mix_model.partition[pattern_num]):
# the on road is much more stable, however the off road ones are quite noisy
plt.cla()
frame_temp = load_frames[pattern_idx[i]]
plt.quiver(frame_temp.x, frame_temp.y, frame_temp.vx, frame_temp.vy)
plt.xlim([0, 60])
plt.ylim([1300, 1600])
plt.show()
plt.pause(0.05)
plt.ioff()
elif dataset == 'ARGO':
with open("data_sample/argo_MixtureModel_%d_%d_%d_%d" % (xmin, xmax, ymin, ymax),
"rb") as mix_np: # load saved mixture model
mix_model = pickle.load(mix_np)
with open("data_sample/argo_%d_%d_%d_%d" % (xmin, xmax, ymin, ymax), "rb") as frame_np: # load saved frames
load_frames = pickle.load(frame_np)
print('everything loaded')
# visualize frames from the same pattern
for i in range(mix_model.K):
# pattern_num = np.argmax(np.array(mix_model.partition))
pattern_num = i
pattern_idx = idx = np.where(np.array(mix_model.z) == pattern_num)
pattern_idx = np.asarray(pattern_idx)
pattern_idx = pattern_idx[0].astype(int)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
avm = ArgoverseMap()
city_name = 'PIT'
rate = np.array(mix_model.partition)[pattern_num] / mix_model.n
plt.ion()
for i in range(mix_model.partition[pattern_num]):
# the on road is much more stable, however the off road ones are quite noisy
plt.cla()
frame_temp = load_frames[pattern_idx[i]]
draw_local_map(avm, city_name, xmin, xmax, ymin, ymax, ax)
plt.quiver(frame_temp.x, frame_temp.y, frame_temp.vx, frame_temp.vy, color='#ED5107')
plt.xlim([xmin, xmax])
plt.ylim([ymin, ymax])
name = 'PIT_%d_%d_%d_%d_' % (xmin, xmax, ymin, ymax) + str(pattern_num) + '_' + str(round(rate, 3))
plt.title(name)
plt.show()
plt.pause(0.05)
plt.ioff()
# velocity field visualization
def velocity_field_visualization(xmin, xmax, ymin, ymax):
with open("data_sample/argo_MixtureModel_%d_%d_%d_%d" % (xmin, xmax, ymin, ymax),
"rb") as mix_np: # load saved mixture model
mix_model = pickle.load(mix_np)
with open("data_sample/argo_%d_%d_%d_%d" % (xmin, xmax, ymin, ymax), "rb") as frame_np: # load saved frames
load_frames = pickle.load(frame_np)
print('everything loaded')
# visualize frames from the same pattern
# for i in range(mix_model.K):
for i in range(1):
pattern_num = i
pattern_num = np.argmax(np.array(mix_model.partition))
rate = np.array(mix_model.partition)[i]/mix_model.n
frame_pattern_ink = mix_model.frame_ink(pattern_num, 0, True)
# construct mesh frame
x = np.linspace(xmin, xmax, 31)
y = np.linspace(ymin, ymax, 31)
[WX,WY] = np.meshgrid(x, y)
WX = np.reshape(WX, (-1, 1))
WY = np.reshape(WY, (-1, 1))
frame_field = Frame(WX.ravel(), WY.ravel(), np.zeros(len(WX)), np.zeros(len(WX)))
#get posterior
ux_pos, uy_pos, covx_pos, covy_pos = mix_model.b[pattern_num].GP_posterior(frame_field, frame_pattern_ink, True)
print('now start plotting')
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
avm = ArgoverseMap()
city_name = 'PIT'
plt.quiver(WX, WY, ux_pos, uy_pos, width=0.002, color='#ED5107')
draw_local_map(avm, city_name, xmin, xmax, ymin, ymax, ax)
plt.xlabel('x_map_coordinate')
plt.ylabel('y_map_coordinate')
name = 'PIT_%d_%d_%d_%d_' % (xmin, xmax, ymin, ymax) +str(pattern_num) + '_' + str(round(rate, 3))
print(name)
plt.title(name)
plt.savefig('fig/'+name + '.png')
plt.close()
plt.show()
# Note that it doesn't have to show the
return WX, WY, ux_pos, uy_pos
# dataset = 'ARGO'
# vis_vel_field = True
#
# if vis_vel_field:
# WX, WY, ux_pos, uy_pos = velocity_field_visualization(2570, 2600, 1180, 1210)
# else:
# frame_in_pattern_vis(dataset)
#
# print('Visualization Finished') |
5,576 | 1b49cb59ebdb548cfc7567cd5cb4affe30f33aac | import pytest
@pytest.mark.usefixtures("driver")
class BaseClass:
"""BaseClass takes in driver fixture."""
|
5,577 | bc7a7b9ba4b3277c862aadb57b56661c24efc6e5 | from django.db import models
# Create your models here.
class Orders(models.Model):
customer_name = models.CharField(max_length=80)
customer_email = models.CharField(max_length=120)
customer_mobile = models.CharField(max_length=40)
status = models.CharField(max_length=20)
process_url = models.CharField(max_length=150, null=True)
session_id = models.CharField(max_length=100, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
5,578 | 9e37b728d8045726aef7625fccc14111ecb0e1c8 | # -*- coding: utf-8 -*-
# @Author: Lich_Amnesia
# @Email: alwaysxiaop@gmail.com
# @Date: 2016-11-17 11:00:33
# @Last Modified time: 2016-11-17 11:00:34
# @FileName: 346.py
class MovingAverage(object):
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.size = size
self.q = collections.deque()
self.sum_ = 0
def next(self, val):
"""
:type val: int
:rtype: float
"""
if len(self.q) == self.size:
a = self.q.popleft()
self.sum_ -= a
self.q.append(val)
self.sum_ += val
return float(self.sum_) / len(self.q)
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param_1 = obj.next(val) |
5,579 | 888a5847beca2470f4063da474da1f05079abca9 | from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth import get_user_model
from tweets.models import Tweet
from ..models import UserProfile
User = get_user_model()
class TestAccountsViews(TestCase):
def setUp(self):
self.username = 'masterbdx'
self.email = 'masterbdx@gmail.com'
self.password = '123456789'
self.user = User.objects.create_superuser(email=self.email,
username=self.username,
password=self.password,
subscribed=True
)
self.tweet = Tweet.objects.create(
user=self.user,
content='hello world',)
self.client = Client()
def test_login_view(self):
response = self.client.get(reverse('accounts:login'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/login.html')
def test_logout_view(self):
response = self.client.get(reverse('accounts:logout'))
self.assertEqual(response.status_code, 302)
def test_profile_view(self):
url = reverse('accounts:profile', kwargs={'user_slug': self.user.slug})
response = self.client.get(url)
self.assertTemplateUsed(response, 'accounts/profile.html')
self.assertEqual(response.status_code, 200)
def test_register_view(self):
url = reverse('accounts:register')
response = self.client.get(url)
self.assertTemplateUsed(response, 'accounts/register.html')
self.assertEqual(response.status_code, 200)
def test_userfollow_view(self):
url = reverse('accounts:follow', kwargs={'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(
email=self.email, password=self.password)
self.assertEqual(response.status_code, 302)
def test_follow_manager_view(self):
url = reverse('accounts:follow_manage', kwargs={
'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(
email=self.email, password=self.password)
self.assertEqual(response.status_code, 302)
def test_profile_update_view(self):
url = reverse('accounts:profile_update', kwargs={
'pk': self.user.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_followers_view(self):
url = reverse('accounts:followers', kwargs={
'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/followers.html')
def test_following_view(self):
url = reverse('accounts:following', kwargs={
'user_slug': self.user.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/following.html')
def test_user_like_view(self):
url = reverse('accounts:user-like', kwargs={
'slug': self.user.slug, 'pk': self.tweet.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(
email=self.email, password=self.password)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_theme_view(self):
url = reverse('accounts:theme')
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.client.login(
email=self.email, password=self.password)
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
|
5,580 | 67e06b6dddbd3f26295eaff921d1ad4a8b0e5487 | import importlib
class Scrapper:
def get_pos(str_lf,str_rg,text):
left = text.find(str_lf)
right = text.rfind(str_rg)
return left, right
def scrapper(prov):
scrapper = importlib.import_module('scrappers.{}'.format(prov))
return scrapper.scrape()
|
5,581 | fa825846c54ed32c2ede94128ac08f9d5e172c0f | # -*- coding: utf-8 -*-
import urllib2, json, traceback
from django.conf import settings
from django.db import models
from TkManager.order.models import User
from TkManager.juxinli.models import *
from TkManager.juxinli.error_no import *
from TkManager.common.tk_log import TkLog
from datetime import datetime
from django_gearman_commands import GearmanWorkerBaseCommand
from django.db import transaction
import objgraph
class JuxinliBaseCommand(GearmanWorkerBaseCommand):
"""
从聚信力获取json数据,然后把数据存入数据库
init_config 配置数据的存储方式,需要子类自己实现 配置文件格式参看注释
get_juxinli_data 执行解析存储操作
"""
def __init__(self):
super(JuxinliBaseCommand, self).__init__()
self._org_name = settings.JUXINLI_CONF['org_name']
self._client_secret = settings.JUXINLI_CONF['client_secret']
self._access_report_data_api = settings.JUXINLI_CONF['access_report_data_api']
self._access_raw_data_api = settings.JUXINLI_CONF['access_raw_data_api']
self._access_report_token_api = settings.JUXINLI_CONF['access_report_token_api']
self._access_e_business_raw_data_api = settings.JUXINLI_CONF['access_e_business_raw_data_api']
self._options = {
'update_days' : 21,
'force_update' : False,
}
self.init_config()
def init_config():
'''
参考格式:
self._transformer = {
'basic_transformer' : {
'name' : 'PhoneBasic', # django的Model类名称
'path' : 'raw_data/members/transactions:0/basic', #json数据的路径
'data_type' : 'map', # 数据的类型如果是单条就是map,如果是多条就是list
'version' : True, # 是否使用版本控制,如果是真那么每次拉数据会新增版本号,否则都用版本1
'trans' : { #数据的转化格式 source_field(json) -> dest_field(db model)
"cell_phone": "cell_phone",
"idcard": "idcard",
"real_name": "real_name",
"reg_time": "reg_time",
"update_time": "update_time",
"receiver" : { #如果是外键就用一个嵌套的格式来表示 (嵌套就没必要再用path定位了吧,默认就是当前路径)
"name" : "Receiver"
"req_call_cnt/data_type" : "list"
"version" : True,
"trans": {
"name" : "name",
"phone_num_list" : "phone_num_list",
"amount" : "amount",
"count" : "count",
},
},
},
},
}
'''
pass
def test(self,user,data):
if not data:
return ERR_GET_RAW_DATA_FAILED
ret_code = self._save_raw_data(data, user, self._options)
return ret_code
def get_juxinli_data(self, uid, url):
try:
user = User.objects.get(pk=uid)
token = self._get_token()
if not token:
return ERR_CREATE_TOKEN_FAILED
data = self._get_juxinli_data(token, user, url)
if not data:
return ERR_GET_RAW_DATA_FAILED
ret_code = self._save_raw_data(data, user, self._options)
if ret_code != 0:
return ret_code
#data = self._get_report_data(token, user)
#print data
#print "@@ print ret", ret_code
return RETURN_SUCCESS
except Exception, e:
traceback.print_exc()
TkLog().error("get juxinli call failed %s" % str(e))
return ERR_OTHER_EXCEPTION
def _open_url(self, url):
'''
get http request return json
'''
req1 = urllib2.Request(url=url)
html = urllib2.urlopen(req1).read().decode('utf-8')
return json.loads(html.encode("utf-8"))
def _get_token(self):
'''
生成一个新的用来获取数据的token 失败返回None
'''
url = u"%s?client_secret=%s&hours=24&org_name=%s" % (self._access_report_token_api, self._client_secret, self._org_name)
html = self._open_url(url)
#if
try:
res = html['access_token']
return res
except KeyError, e:
return None
def _get_juxinli_data(self, access_token, user, url):
'''
获取聚信力数据 返回json
'''
raw_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (url, self._client_secret, access_token, user.name, user.id_no, user.phone_no)
#print raw_url
try:
res = self._open_url(raw_url.encode('utf-8'))
# print res
# print res['raw_data']['members']['error_msg']
success = res["success"]
if success != "true":
return None
return res
except KeyError, e:
return None
#def _get_report_data(self, access_token, user):
# report_url = u'%s?client_secret=%s&access_token=%s&name=%s&idcard=%s&phone=%s' % (self._access_report_token_api, self._client_secret, access_token, user.name, user.id_no, user.phone_no)
# print report_url
# res = self._open_url(report_url.encode('utf-8'))
# #print res
# #print res['raw_data']['members']['error_msg']
# return res
def _allow_overwrite_data(self, user, options):
return True
def _get_data_from_path(self, data, path):
'''
path语法 / 分割路径 : 选择list中的序号
'''
try:
fields = path.split("/")
#print fields
res = data
for field in fields:
if field.find(":") != -1:
parts = field.split(":")
if len(parts) != 2:
TkLog().error("field format error %s" % (field))
return None
res = res[parts[0]][int(parts[1])]
else:
res = res[field]
return res
except Exception, e:
print e
traceback.print_exc()
TkLog().error("get data from path failed %s" % str(e))
return None
def _save_raw_data(self, data, user, options):
"""
可以重入,一个用户的信息如果更新时间少于options.update_days天,不会更新db,否则添加记录
"""
if not self._allow_overwrite_data(user, options):
return RETURN_CAN_NOT_OVERWRITE
for transtype in self._transformer.keys():
adaptor = self._transformer[transtype]
cls = eval(adaptor["name"])
version = 0
objs = cls.objects.filter(owner=user).order_by('-id')[:1]
if len(objs) == 1:
version = objs[0].version
TkLog().info("update %s version %d" % (adaptor["name"], version))
data_list = self._get_data_from_path(data, adaptor["path"])
if not data_list:
TkLog().warn("data not found %s:%s" % (adaptor["name"], adaptor["path"]))
#return -4 #just skip
ret_code = self._save_obj(data_list, cls, user, adaptor, version)
if ret_code != 0:
return ret_code
return RETURN_SUCCESS
@transaction.commit_manually
def _save_obj(self, data_list, cls, user, adaptor, version=0, parent=None):
'''
将一个对象写入数据库
根据data_type来判断是map还是list
'''
if adaptor["data_type"] == "list": #data_list是列表数据
for record in data_list:
ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent)
if ret_code != 0:
return ret_code
elif adaptor["data_type"] == "map": #data_list是单条数据
record = data_list
ret_code = self._save_single_obj(record, cls, user, adaptor, version, parent)
if ret_code != 0:
return ret_code
transaction.commit()
return 0
def _save_single_obj(self, record, cls, user, adaptor, version = 0, parent=None):
'''
将一个条目写入数据库,如果parent不为空,还需要设置parent的外键
record : 单条json数据条目
cls : 数据库Model
'''
obj = cls()
for source_field, dest_field in adaptor['trans'].items():
if isinstance(dest_field,str):
field_type = obj._meta.get_field(dest_field)
if "/" in source_field:
record[source_field] = self._get_data_from_path(record,source_field)
if isinstance(field_type, models.CharField):
try:
if isinstance(record[source_field],list):
#setattr(obj, dest_field, "#".join(record[source_field]))
setattr(obj, dest_field, record[source_field][0])
else:
setattr(obj, dest_field, record[source_field])
except Exception, e:
TkLog().warn("set char field failed %s %s" % (str(e), record[source_field]))
return ERR_SETATTR_FAILED
elif isinstance(field_type, models.IntegerField):
try:
if not record[source_field]:
setattr(obj, dest_field, 0)
else:
setattr(obj, dest_field, int(record[source_field]))
except Exception, e:
TkLog().warn("set int field failed %s %s" % (str(e), record[source_field]))
return ERR_SETATTR_FAILED
elif isinstance(field_type, models.BigIntegerField):
try:
if not record[source_field]:
setattr(obj, dest_field, 0)
else:
setattr(obj, dest_field, long(record[source_field]))
except Exception, e:
TkLog().warn("set bigint field failed %s %s" % (str(e), record[source_field]))
return ERR_SETATTR_FAILED
elif isinstance(field_type, models.FloatField):
try:
if not record[source_field]:
setattr(obj, dest_field, float(0))
else:
setattr(obj, dest_field, float(record[source_field]))
except Exception, en:
TkLog().warn("set float field failed %s %s" % (str(e), record[source_field]))
return ERR_SETATTR_FAILED
elif isinstance(field_type, models.DateTimeField):
try:
if not record[source_field]:
setattr(obj, dest_field, None)
else:
setattr(obj, dest_field, datetime.strptime(record[source_field], "%Y-%m-%d %H:%M:%S"))
except Exception, e:
TkLog().warn("set datetime field failed %s %s" % (str(e), record[source_field]))
return ERR_SETATTR_FAILED
elif isinstance(field_type, models.NullBooleanField):
try:
if not record[source_field]:
setattr(obj, dest_field, None)
else:
setattr(obj, dest_field, record[source_field])
except Exception, e:
TkLog().warn("set boolean field failed %s %s" % (str(e), record[source_field]))
return ERR_SETATTR_FAILED
else:
TkLog().error("unsupported type field:%s" % dest_field)
return ERR_UNSUPPORTED_FILED_TYPE
try:
if adaptor['version']:
obj.version = version + 1
else:
obj.version = 0
#if parent:
#setattr(obj, parent["field"], parent["parent_obj"])
obj.owner = user
obj.save()
except Exception, e:
print "save error %s" % str(e)
return ERR_SAVE_OBJECT
for source_field, dest_field in adaptor['trans'].items():
if isinstance(dest_field,dict):
try:
sub_cls = eval(dest_field["name"])
self._save_obj(record[source_field], sub_cls, obj, dest_field, version, {"parent_obj":obj, "field":"owner"})
except Exception, e:
TkLog().warn("set foreignkey field failed %s %s" % (str(e), record[source_field]))
objgraph.show_most_common_types()
return 0
|
5,582 | a372289d15b55f43887a37bb78a9fc308ddd0371 | # Generated by Django 3.0.4 on 2020-03-24 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0002_auto_20200324_1635'),
]
operations = [
migrations.AddField(
model_name='student',
name='parent_mobile',
field=models.CharField(max_length=100, null=True),
),
]
|
5,583 | 9da6bfa614d64956a302abbfeeea30c0339e9db3 | import tensorflow.contrib.slim as slim
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import init_ops
import numpy as np
WEIGHT_DECAY = 0.0005
class ScaledVarianceUniform(init_ops.Initializer):
"""Initializer that generates tensors with a Uniform distribution scaled as per https://github.com/torch/nn/blob/master/Linear.lua
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
self.stddev = np.sqrt(self.factor * 3.0 / n)
return random_ops.random_uniform(shape, minval=-self.stddev, maxval=self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name}
class ScaledVarianceRandomNormal(init_ops.Initializer):
"""Initializer that generates tensors with a normal distribution scaled as per https://arxiv.org/pdf/1502.01852.pdf.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
for dim in shape[:-2]:
n *= float(dim)
self.stddev = np.sqrt(self.factor * 2.0 / n)
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name}
class ConvLayer(object):
def __init__(self, scope, num_outputs, kernel_size, padding='SAME', dropout=False, stride=1, normalizer_fn=False,
activation_fn=False, weights_initializer=tf.contrib.layers.xavier_initializer(), weights_regularizer=None):
self.scope = scope
self.dropout = dropout
self.padding = padding
self.num_outputs = num_outputs
self.kernel_size = kernel_size
self.stride = stride
self.normalizer_fn = normalizer_fn
self.activation_fn=activation_fn
self.weights_initializer = weights_initializer
self.weights_regularizer = weights_regularizer
def apply(self, h):
if self.activation_fn == False:
if self.normalizer_fn==False:
if self.dropout==False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope+'-dropout'), num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size,
stride=self.stride, scope=self.scope, padding=self.padding,
normalizer_fn=self.normalizer_fn, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope + '-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
if self.normalizer_fn==False:
if self.dropout==False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope+'-dropout'), num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding,
activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size,
stride=self.stride, scope=self.scope, padding=self.padding,
normalizer_fn=self.normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope + '-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
return h_out
class ProjectionAdaptor(object):
def __init__(self, scope, projection_width, num_outputs, dropout=False):
self.dim_reduction_layer = ConvLayer(num_outputs=projection_width, kernel_size=1, stride=1, padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY),
dropout=dropout, scope=scope + '/adapter/dim_reduction')
self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=1, stride=1, padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY),
dropout=dropout, scope=scope + '/adapter/output', normalizer_fn=None, activation_fn=None)
def apply(self, h):
reduced_space = self.dim_reduction_layer.apply(h)
return self.output_layer.apply(reduced_space)
def split(input_layer, stride, bottleneck_depth):
'''
The split structure in Figure 3b of the paper. It takes an input tensor. Conv it by [1, 1,
64] filter, and then conv the result by [3, 3, 64]. Return the
final resulted tensor, which is in shape of [batch_size, input_height, input_width, 64]
:param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,
input_channel]
:param stride: int. 1 or 2. If want to shrink the image size, then stride = 2
:return: 4D tensor in shape of [batch_size, input_height, input_width, input_channel/64]
'''
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope('bneck_%d_1x1_%dd' %(input_depth, bottleneck_depth)):
bneck_1x1 = slim.conv2d(input_layer, num_outputs=bottleneck_depth, kernel_size=1, stride=1,
padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('bneck_%d_3x3_%dd' %(bottleneck_depth, bottleneck_depth)):
bneck_3x3 = slim.conv2d(bneck_1x1, num_outputs=bottleneck_depth, kernel_size=3, stride=stride,
padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
return bneck_3x3
def bottleneck_b(input_layer, stride, cardinality, bottleneck_depth):
'''
The bottleneck strucutre in Figure 3b. Concatenates all the splits
:param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,
input_channel]
:param stride: int. 1 or 2. If want to shrink the image size, then stride = 2
:return: 4D tensor in shape of [batch_size, output_height, output_width, output_channel]
'''
nInputPlane = input_layer.get_shape().as_list()[-1]
split_list = []
for i in range(cardinality):
with tf.variable_scope('split_%i'%i):
splits = split(input_layer=input_layer, stride=stride, bottleneck_depth=bottleneck_depth)
split_list.append(splits)
# Concatenate splits and check the dimension
concat_bottleneck = tf.concat(values=split_list, axis=3, name='concat_splits')
return concat_bottleneck
class ResNextAdaptor(object):
'''
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
'''
def __init__(self, scope, cardinality, output_depth, num_filters, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.output_depth = output_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride, bottleneck_depth=self.num_filters,
cardinality=self.cardinality)
restored = slim.conv2d(bottleneck_out, num_outputs=self.output_depth, kernel_size=1, stride=1,
scope='restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != self.output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=self.output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
return residual
class ResNextBlock(object):
'''
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
'''
def __init__(self, scope, cardinality, bottleneck_depth, num_filters, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.bottleneck_depth = bottleneck_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
# output width 4*self.num_filters as per line 96 in
# https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua, commit 833a384
output_depth = 4*self.num_filters
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride, bottleneck_depth = self.num_filters, cardinality=self.cardinality)
restored = slim.conv2d(bottleneck_out, num_outputs=output_depth, kernel_size=1, stride=1,
scope='restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
# output = tf.nn.relu(residual, name='residual_relu')
output = residual
return output
class ResNextGroup(object):
def __init__(self, scope, num_blocks, num_filters, bottleneck_depth, cardinality, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.cardinality = cardinality
self.num_blocks = num_blocks
self.bottleneck_depth = bottleneck_depth
self.stride = stride
def apply(self, h):
tensor_stack = [h]
with tf.variable_scope(self.scope):
for i in range(self.num_blocks):
if i == 0:
stride=self.stride
else:
stride=1
h = ResNextBlock(num_filters=self.num_filters, cardinality=self.cardinality, bottleneck_depth = self.bottleneck_depth,
stride=stride, dropout=self.dropout, scope='block%d' % i).apply(tensor_stack[-1])
tensor_stack.append(h)
return tensor_stack[-1]
class AveragePoolLayer(object):
def __init__(self, scope, axis, keep_dims):
self.scope = scope
self.axis=axis
self.keep_dims=keep_dims
def apply(self, h):
with tf.variable_scope(self.scope):
average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self.keep_dims)
return average_pool
# The ResNext architecture is based on the following code:
# https://github.com/wenxinxu/ResNeXt-in-tensorflow/blob/master/resNeXt.py
# commit 8a00577495fb01cf98bf77562422390b652e1a4e
# ResNeXt. total layers = 1 + 3n + 3n + 3n +1 = 9n + 2
ArchitectureResNext = [
ConvLayer(num_outputs=64, kernel_size=3, stride=1, scope='input/conv0',
activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY)),
ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block1'),
ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block2'),
ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block3'),
ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=2, dropout=False, scope='bottleneck_group2/block1'),
ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group2/block2'),
ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group2/block3'),
ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=2, dropout=False, scope='bottleneck_group3/block1'),
ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group3/block2'),
ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group3/block3'),
AveragePoolLayer(scope='avg_pool', axis=[1,2], keep_dims=True),
ConvLayer(num_outputs=10, kernel_size=1, stride=1, normalizer_fn=None, activation_fn=None,
weights_initializer=ScaledVarianceUniform(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY), scope='logits/fc'),
]
AdaptorStack = [
None,
ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor1'),
ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor2'),
ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor3'),
ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=2, dropout=False, scope='bottleneck_group2/adaptor1'),
ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=1, dropout=False, scope='bottleneck_group2/adaptor2'),
ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=1, dropout=False, scope='bottleneck_group2/adaptor3'),
ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=2, dropout=False, scope='bottleneck_group3/adaptor1'),
ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=1, dropout=False, scope='bottleneck_group3/adaptor2'),
ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=1, dropout=False, scope='bottleneck_group3/adaptor3'),
None,
ProjectionAdaptor(projection_width=1024/8, num_outputs=10, dropout=False, scope='logits/adaptor')
] |
5,584 | e364a4e6e1c4e0fd6805515a1149adaf92e9c8fb | n = input()
n = list(n)
n.sort()
alph = []
num = []
for i in range(n) :
if i.isalpha() :
alpa.append(i)
else :
num.append(i)
result.append(str(alpa))
result.append(str(num))
print(n)
|
5,585 | 8a6c9fa67c02d69444c9c3a2e6811b982c49eb4e | """Contains functionality for tokenizing, parsing, embedding language."""
from . import parsing
from . import cleaning
from .config import NATURAL_EMB_DIM
|
5,586 | 5068336ca1a180e09a7efd41eea596cdcebb33ae | from flask import Blueprint, request, jsonify
from to_dict import *
from validacao import *
import sqlite3
from migration import conectar, create_database
from contextlib import closing
aluno = Blueprint("aluno", __name__)
@aluno.route("/hello")
def hello():
return "Hello, aluno"
@aluno.route("/reseta", methods = ["POST"])
def reseta():
sqlaluno = """DELETE FROM aluno"""
sqldisciplina = """DELETE FROM disciplina"""
sqlprofessor = """DELETE FROM professor"""
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route("/alunos", methods = ["GET"])
def alunos_retorna_lista():
sql = """SELECT * FROM aluno"""
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({"id": id, "nome": nome})
return jsonify(resultados), 200
#return jsonify(alunos), 200
@aluno.route('/alunos/<int:id>', methods = ["GET"])
def aluno_por_id(id):
sql = "SELECT id, nome FROM aluno WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (id, ))
r = cursor.fetchone()
if r == None: return None
return {"id": r[0], "nome": r[1]}
@aluno.route("/alunos", methods = ["POST"])
def adiciona_alunos():
dados = request.get_json()
params = (dados['nome'],)
sql = "INSERT INTO aluno (nome) VALUES (?)"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route("/alunos/<int:id>", methods = ["PUT"])
def editar_aluno(id):
dados = request.get_json()
params = (dados['nome'], id)
sql = "UPDATE aluno SET nome = ? WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(dados['nome']), 200
# for aluno in alunos:
# if aluno['id'] == id:
# aluno['nome'] = request.get_json().get('nome')
# return jsonify(aluno), 200
# return jsonify({'erro': 'aluno não encontrado'}), 404
@aluno.route("/alunos/<int:id>", methods = ["DELETE"])
def deletar_aluno(id):
params = (id,)
sql = "DELETE FROM aluno WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(id), 200 |
5,587 | 4bc9896847e4ab92a01dfcf674362140cc31ef4f | import ga.ga as ga
import os
import datetime
def ga_optimise(synth, param_count, target, output_dir, iterations = 10, pop_size = 500):
fs = ga.ga_optimise(compute_population_fitnesses = ga.compute_population_fitnesses,
target = target,
synth = synth,
param_count = param_count,
iterations = iterations,
pop_size = pop_size,
crossovers = param_count / 5,
mutation_rate = 0.5,
log = True,
data_folder = output_dir)
return fs
if __name__ == '__main__':
vst_synth = "../mda DX10.vst"
vst_param_count = 15
target_dir = "../runs/" + datetime.datetime.now().strftime("%Y%m%d%H%M%s") + "/"
os.mkdir(target_dir)
print "Generating set of target sounds from 32 presets on "+vst_synth
# first generate the target sounds
# which are the 32 presets from the synth
for i in range(0, 32):
filename = target_dir + "preset_"+str(i)+".wav"
print "Target "+str(i)+": "+filename
ga.render_preset(vst_synth, i, filename)
for i in range(0, 32):
filename = target_dir + "preset_"+str(i)+".wav"
print "Looking for target: "+filename
target_mfccs = ga.wav_to_mfcc(filename)
data_folder = target_dir + "_preset_"+str(i) + "/"
try:
os.mkdir(data_folder)
except:
print "data folder already there."
ga.string_to_file("synth: "+vst_synth + "\npreset: "+str(i), data_folder + "details.txt")
ga_optimise(vst_synth, vst_param_count, target_mfccs, data_folder)
# targets = ga.get_files_in_dir(test_dir, filter = "wav")
# for target in targets:
# print "Looking for "+target
# target_mfccs = ga.wav_to_mfcc("test.wav")
# data_folder = "data/data_"+target+"/"
# try:
# os.mkdir(data_folder)
# except:
# print "data folder already there."
# ga_optimise(vst_synth, vst_param_count, target_mfccs, data_folder)
|
5,588 | a352768c2928cb7a33b9f1a31a0b3d8e56a8376a | # -*- coding: utf-8 -*-
# Scrapy settings for reddit_scraper project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'reddit_scraper'
SPIDER_MODULES = ['reddit_scraper.spiders']
NEWSPIDER_MODULE = 'reddit_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'reddit_scraper (+http://www.yourdomain.com)'
|
5,589 | 012ab947f7a2c9d44f54464b3e477582ffcf3d77 | # -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
|
5,590 | c005ae9dc8b50e24d72dbc99329bb5585d617081 | from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import ReadOnlyField
from rest_framework.serializers import SlugField
from rest_framework.validators import UniqueValidator
from django.db import models
from illumidesk.teams.util import get_next_unique_team_slug
from illumidesk.users.models import IllumiDeskUser
from .models import Invitation
from .models import Membership
from .models import Team
class IllumiDeskUserSerializer(ModelSerializer):
class Meta:
model = IllumiDeskUser
fields = ('first_name', 'last_name', 'get_display_name')
abstract = True
class MembershipSerializer(ModelSerializer):
first_name = ReadOnlyField(source='user.first_name')
last_name = ReadOnlyField(source='user.last_name')
display_name = ReadOnlyField(source='user.get_display_name')
class Meta:
model = Membership
fields = ('id', 'first_name', 'last_name', 'display_name', 'role')
class InvitationSerializer(ModelSerializer):
id = ReadOnlyField()
invited_by = ReadOnlyField(source='invited_by.get_display_name')
class Meta:
model = Invitation
fields = ('id', 'team', 'email', 'role', 'invited_by', 'is_accepted')
class TeamSerializer(ModelSerializer):
slug = SlugField(
required=False,
validators=[UniqueValidator(queryset=Team.objects.all())],
)
members = MembershipSerializer(source='membership_set', many=True, read_only=True)
invitations = InvitationSerializer(many=True, read_only=True, source='pending_invitations')
dashboard_url = ReadOnlyField()
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'members', 'invitations', 'dashboard_url')
def create(self, validated_data):
team_name = validated_data.get("name", None)
validated_data['slug'] = validated_data.get("slug", get_next_unique_team_slug(team_name))
return super().create(validated_data)
|
5,591 | ebd510bcd0caded03c5bcc36a11945710d5e644b | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import math
import re
import copy
from lcs import lcs
# Create your models here.
keywords=["int","long","for","while","if","else","break","continue","return","true","false","double","do","signed","unsigned"]
symbol=["[","]","{","}","(",")","&","|","^","%","+","-","*","/",":",";","?","!",".","\"","\'",",","=","#","<",">","_","\\","\n","\r"]
mapKeyword={}
mapSymbol={}
treeMem=[]
def init():
for i in range(0,KEYNUM):
mapKeyword[keywords[i]]=chr(i+ord('a'))
for i in range(0,26):
tmp=chr(ord('a')+i)
tmp2=chr(ord('A')+i)
mapSymbol[tmp]=i
mapSymbol[tmp2]=i+26
if i<10:
tmp3=chr(ord('0')+i)
mapSymbol[tmp3]=i+52
for i in range(0,SYMNUM):
tmp=symbol[i]
mapSymbol[tmp]=62+i
def solve2(Str,len1,len2):
code0 = copy.deepcopy(Str[0])
code1 = copy.deepcopy(Str[1])
tlen = len(code0)
plen = len(code1)
ans = 0
MML = 4
MaxLen=MML+1
whileTime = 0
td = 0
while MaxLen>MML:
whileTime += 1
MaxLen = MML
j = 1
now_i=0
# t1 = datetime.now()
dp=[65536*[0],65536*[0]]
k,s,t = lcs(Str[0],Str[1])
# t2 = datetime.now()
# print t2-t1
# print k
# print code0[s:s+k]+" XXX "+code1[t:t+k]+" OOO "
if k<MaxLen:
continue
#print code0[s:s+k]
#print code1[t:t+k]
if s+k<tlen and code0[s+k]!='$':
code0 = code0[0:s]+"$"+code0[s+k:tlen]
else:
code0 = code0[0:s]+code0[s+k:tlen]
tlen=len(code0)
if t+k<plen and code1[t+k]!='$':
code1 = code1[0:t]+"$"+code1[t+k:plen]
else:
code1 = code1[0:t]+code1[t+k:plen]
plen=len(code1)
ans+=k
MaxLen = k
#Set=[]
# print k
# print Str
print whileTime
print td
return ans
def probably2(s,len1,len2):
return 2.0*float(s)/(float(len1+len2))
"""
def probably1(cls,s,len1,len2):
return float(s)/(float(len1+len2)*0.5)
"""
def test2(codes):
length=[0]*2
similar=0
Str=['','']
for j in range(0,2):
code=open(codes[j])
try:
all_code=code.read()
finally:
code.close()
lent=len(all_code)
for i in range(0,lent):
if i<lent-1 and all_code[i]=='/' and all_code[i+1]=='/':
while i<lent and all_code[i]!='\n':
i+=1
continue
if i<lent-1 and all_code[i]!='/' and all_code[i+1]=='*':
while i<lent-1 and not (all_code[i]=='*' and all_code[i+1]=='/'):
i+=1
continue
if all_code[i]!=' ' and all_code[i]!='\r' and all_code[i]!='\n':
Str[j]+=str(all_code[i])#.append(query_info.codes[j][i])
length[j]+=1
# print "str2="
# print Str
t1 = datetime.now()
print 't1=',t1
similar=solve2(Str,length[0],length[1])
t2 = datetime.now()
print 't2=',t2
tdelta = t2-t1
print 'td=',tdelta.total_seconds()
# print similar
return probably2(similar,length[0],length[1])*100
codes = ['./1.cpp', './2.cpp',]
test2(codes)
|
5,592 | 6e2fb9d498294a580426ff408183f7beec135329 | # function to add two numbers
def add2nums(a,b):
return a+b
|
5,593 | 1bd1769f94b93e0bb674adfd1bb96c778708f6d8 | from django.urls import re_path
from .consumers import ChatConsumer, ChatLobbyConsumer
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_id>\w+)/$', ChatConsumer),
re_path(r'ws/lobby/$', ChatLobbyConsumer),
] |
5,594 | cfba55505f3290a14b98d594bc871a74812c7c57 | """
Note: names of methods in this module, if seem weird, are the same as in Hunspell's ``suggest.cxx``
to keep track of them.
"""
from typing import Iterator, Union, List, Set
from spylls.hunspell.data import aff
MAX_CHAR_DISTANCE = 4
def replchars(word: str, reptable: List[aff.RepPattern]) -> Iterator[Union[str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
# TODO: compiled at aff loading
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_', ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) -> Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos+len(option):]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
def swapchar(word: str) -> Iterator[str]:
"""
Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces
also doubleswaps: ahev -> have.
"""
if len(word) < 2:
return
for i in range(0, len(word) - 1):
yield word[:i] + word[i+1] + word[i+1] + word[i+2:]
# try double swaps for short words
# ahev -> have, owudl -> would
if len(word) in [4, 5]:
yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1] + word[-2]
if len(word) == 5:
yield word[0] + word[2] + word[1] + word[-1] + word[-2]
def longswapchar(word: str) -> Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):
yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]
def badcharkey(word: str, layout: str) -> Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i+1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos-1] != '|':
yield before + layout[pos-1] + after
if pos + 1 < len(layout) and layout[pos+1] != '|':
yield before + layout[pos+1] + after
pos = layout.find(c, pos+1)
def extrachar(word: str) -> Iterator[str]:
"""
Produces permutations with one char removed in all possible positions
"""
if len(word) < 2:
return
for i in range(0, len(word)):
yield word[:i] + word[i+1:]
def forgotchar(word: str, trystring: str) -> Iterator[str]:
"""
Produces permutations with one char inserted in all possible possitions.
List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,
doesn't try anything! Chars there are expected to be sorted in order of chars usage in language
(most used characters first).
"""
if not trystring:
return
for c in trystring:
for i in range(0, len(word)):
yield word[:i] + c + word[i:]
def movechar(word: str) -> Iterator[str]:
"""
Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,
because it is already handled by :meth:`swapchar`)
"""
if len(word) < 2:
return
for frompos, char in enumerate(word):
for topos in range(frompos + 3, min(len(word), frompos + MAX_CHAR_DISTANCE + 1)):
yield word[:frompos] + word[frompos+1:topos] + char + word[topos:]
for frompos in reversed(range(0, len(word))):
for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1), frompos - 1)):
yield word[:topos] + word[frompos] + word[topos:frompos] + word[frompos+1:]
def badchar(word: str, trystring: str) -> Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i+1:]
def doubletwochars(word: str) -> Iterator[str]:
"""
Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)
"""
if len(word) < 5:
return
# TODO: 1) for vacacation yields "vacation" twice, hunspell's algo kinda wiser
# 2) maybe just use regexp?..
for i in range(2, len(word)):
if word[i-2] == word[i] and word[i-3] == word[i-1]:
yield word[:i-1] + word[i+1:]
def twowords(word: str) -> Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
|
5,595 | 53841ba56589955e09b03018af1d0ae79b3756c4 | #!/usr/bin/python
# coding=utf-8
import time
import atexit
# for signal handling
import signal
import sys
# ----------------------
# Encoder stuff
# ----------------------
import RPi.GPIO as GPIO
# init
GPIO.setmode(GPIO.BCM) # use the GPIO names, _not_ the pin numbers on the board
# Raspberry Pi pin configuration:
# pins BCM BOARD
leftEncoderGPIO = 27 # pin
rightEncoderGPIO = 22 # pin
# setup
print("setup...")
GPIO.setup(leftEncoderGPIO, GPIO.IN)
GPIO.setup(rightEncoderGPIO, GPIO.IN)
# for counting encoder steps
leftSteps = 0
rightSteps = 0
# driven distance in cm
leftDistance = 0
rightDistance = 0
# encoder pulse detection by interrupt
def leftEncoderCallback(answer):
global leftSteps
leftSteps = leftSteps +1
# measure distance
global leftDistance
leftDistance = leftDistance + 0.24
print("Left Encoder.")
def rightEncoderCallback(answer):
global rightSteps
rightSteps = rightSteps +1
global rightDistance
rightDistance = rightDistance + 0.24
print("Right Encoder.")
# add GPIO event detectors
print("registering event handlers...")
# enabling event handlers (if needed only)
def enableEncoderTracking():
GPIO.add_event_detect(leftEncoderGPIO, GPIO.FALLING, callback=leftEncoderCallback)
GPIO.add_event_detect(rightEncoderGPIO, GPIO.FALLING, callback=rightEncoderCallback)
# disabling event handlers
def disableEncoderTracking():
GPIO.remove_event_detect(leftEncoderGPIO)
GPIO.remove_event_detect(rightEncoderGPIO)
# ----------------------
# Motor stuff
# ----------------------
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
# create a default motor object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT(addr=0x60)
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
# user motor 1 and 2 on RasPi hat
myMotor1 = mh.getMotor(1)
myMotor2 = mh.getMotor(2)
# turn off motors
myMotor1.run(Adafruit_MotorHAT.RELEASE);
myMotor2.run(Adafruit_MotorHAT.RELEASE);
# set the speed (from 0 (off) to 255 (max speed))
startSpeed = 100
maxSpeed = 255 # max is 255!
# test switch
fullSpeedDuration = 0 # default 0
myMotor1.setSpeed(startSpeed)
myMotor2.setSpeed(startSpeed)
# ------------------
# my signal handler
# ------------------
def sig_handler(_signo, _stack_frame):
turnOffMotors();
## GPIO cleanup
GPIO.remove_event_detect(leftEncoderGPIO)
GPIO.remove_event_detect(rightEncoderGPIO)
GPIO.cleanup()
print("\n")
print(str(leftSteps) + " left steps are " + str(leftDistance) + " cm driven.")
print(str(rightSteps) + " right steps are " + str(rightDistance) + " cm driven.\n")
sys.exit(0)
# signals to be handled
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGHUP, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
#
# Cowntdown
#
print('Starting in 3...')
time.sleep(1)
print('Starting in 2...')
time.sleep(1)
print('Starting in 1...')
time.sleep(1)
print('GO!\n')
######
###### forever - or until ctrl+c :)
######
while (True):
### drive
# drive
print("Forward! ")
# enable Odometrie
enableEncoderTracking()
myMotor1.run(Adafruit_MotorHAT.FORWARD)
myMotor2.run(Adafruit_MotorHAT.FORWARD)
print("\tSpeed up...")
for i in range(startSpeed, maxSpeed):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
# full speed for n seconds
print("+++ full speed for " + str(fullSpeedDuration) + " seconds +++")
time.sleep(fullSpeedDuration)
print("\tSlow down...")
for i in range(maxSpeed, startSpeed, -1):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
# disable Odometrie
disableEncoderTracking()
# wait one second
time.sleep(1)
""" print("Backward! ")
myMotor1.run(Adafruit_MotorHAT.BACKWARD)
myMotor2.run(Adafruit_MotorHAT.BACKWARD)
print("\tSpeed up...")
for i in range(startSpeed, maxSpeed):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
print("\tSlow down...")
for i in range(maxSpeed, startSpeed, -1):
myMotor1.setSpeed(i)
myMotor2.setSpeed(i)
time.sleep(0.01)
print("Release")
myMotor1.run(Adafruit_MotorHAT.RELEASE)
myMotor2.run(Adafruit_MotorHAT.RELEASE)
"""
# wait some time
time.sleep(0.25)
|
5,596 | b739c1de6c008158ee3806bed9fa2865eb484b4f | import sys
sys.stdin = open("sample_input_17.txt","r")
T = int(input())
def code(N): # 암호코드가 있는 열의 위치를 찾음
code = []
for i in range(N-4):
for j in range(49,53):
if S[i][j] == "1" :
code = S[i]
return code
def code_s(code): # 암호코드의 행 위치를 찾아 슬라이싱
for x in range(M-1,0,-1):
if code[x] == "1" :
return code[x-55:x+1]
def code_c(code_s) : # 암호코드를 7개의 숫자로 슬라이싱하여 해독 정보와 비교
lists = []
for n in range(8):
for m in range(10):
if code_s[n*7:(n+1)*7] == numbers[m] :
lists.append(m)
return lists # 해독 코드
for tc in range(T):
N,M = map(int,input().split())
S = [input() for _ in range(N)]
numbers = ["0001101","0011001","0010011","0111101","0100011",
"0110001","0101111","0111011","0110111","0001011"]
print(f"#{tc+1}",end=" ")
if not (sum(code_c(code_s(code(N)))[0:7:2])*3+sum(code_c(code_s(code(N)))[1:8:2]))%10 : # 해독코드 10배수인지 확인
print(sum(code_c(code_s(code(N))))) # 10배수면 암호코드의 1을 모두 더함
else : # 아니라면 0
print(0)
|
5,597 | 430dccf1001af43c2a713b08dc05d8f04818aa1f | #Script to retrieve relevant files and paths, supply to cx_Freeze to compile into executeable
import os
# import cx_Freeze
files_list = []
dir_path = os.path.dirname(os.path.realpath(__file__))+str('/')
print(dir_path)
for root, directories, filenames in os.walk(str(dir_path)):
for file in filenames:
path=os.path.join(root, file)
if path.find('/.') == -1 and path.find('__pycache__') == -1: #finds the paths where /. doesn't exist
files_list.append(path)
print(files_list)
executables = [cx_Freeze.Executable(dir_path+"scripts/main.py")]
cx_Freeze.setup(
name="try1",
description='A game and a presentation',
author='J. Carter',
options={"build_exe": {"packages":["pygame", "sys", "itertools"],
"include_files":files_list}},
executables = executables
) |
5,598 | f6f0dcb806fbc1e14c0907dd500fdc6a609a19f7 | # -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
import ctypes
from py_dss_interface.models.Base import Base
class MonitorsS(Base):
"""
This interface can be used to read/write certain properties of the active DSS object.
The structure of the interface is as follows:
CStr MonitorsS(int32_t Parameter, CStr Argument);
This interface returns a string according to the number sent in the variable “parameter”. The parameter can be
one of the following.
"""
def monitors_file_name(self) -> str:
"""Returns the name of the CSV file associated with active monitor."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0), ctypes.c_int32(0)))
return result.value.decode('ascii')
def monitors_read_name(self) -> str:
"""Returns the active Monitor object by name."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1), ctypes.c_int32(0)))
return result.value.decode('ascii')
def monitors_write_name(self, argument) -> str:
"""Sets the active Monitor object by name."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2), argument.encode('ascii')))
return result.value.decode('ascii')
def monitors_read_element(self) -> str:
"""Returns the full name of element being monitored by the active Monitor."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3), ctypes.c_int32(0)))
return result.value.decode('ascii')
def monitors_write_element(self, argument) -> str:
"""Sets the full name of element being monitored by the active Monitor."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4), argument.encode('ascii')))
return result.value.decode('ascii')
|
5,599 | 85a3682f144f02aa412d45c901f76c65de2e816d | import graphics
from graphics import *
class Renderer():
def __init__(self, engine, width=700, height=600):
self.width = width
self.height = height
self.engine = engine
self.win = GraphWin("Game Board", width, height)
self.win.setBackground("blue")
def update(self):
self.win.update()
def get_window(self):
return(self.win)
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6): #Determines size of terrain
horLines = Line(Point(0, i*self.height/6),Point(self.width, i*self.height/6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j*self.width/7, 0),Point(j*self.width/7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0,6):
for x in range(0,7):
slot = Circle(Point(x*self.width/7+50,y*self.height/6+50),37.5)
slot.setFill("white")
slot.draw(self.win)
def update_pieces(self,x,y,color):
board = self.engine.get_board()
pointY = y*self.height/6
pointX = x*self.width/7
piece = Circle(Point(pointX+50,pointY+50),37.5)
if color == 'r':
piece.setFill("red")
else:
piece.setFill("black")
piece.draw(self.win)
def end(self):
self.get_window().close()
class Menu(): #CHANGE TO SELF. WIDTH AND HIEGHT
def __init__(self,window):
self.window = window
skyBlue = color_rgb(135,206,250)
royalBlue = color_rgb(65,105,225)
self.menu = Rectangle(Point(.2*500,.15*500),Point(.8*500,.8*500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(.25*500,.2*500),Point(.75*500,.35*500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(.50*500,.275*500), "SAVE")
self.saveTxt.setSize(30)
self.saveTxt.setFace("helvetica")
self.saveTxt.setStyle("bold")
self.load = Rectangle(Point(.25*500,.4*500),Point(.75*500,.55*500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(.50*500,.475*500), "LOAD")
self.loadTxt.setSize(30)
self.loadTxt.setFace("helvetica")
self.loadTxt.setStyle("bold")
self.quit = Rectangle(Point(.25*500,.6*500),Point(.75*500,.75*500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(.50*500,.675*500), "QUIT")
self.quitTxt.setSize(30)
self.quitTxt.setFace("helvetica")
self.quitTxt.setStyle("bold")
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.