seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
13102906744 | # A program that takes two integer inputs a and b, and prints a + b
caseNum = int(input())
sumArr = []
for i in range(caseNum):
temp = input().split()
a = int(temp[0])
b = int(temp[1])
sumArr.append(a + b)
for i in range(caseNum):
print(sumArr[i])
| olwooz/algorithm-practice | 2020_March/200318_10950_A+B-3.py | 200318_10950_A+B-3.py | py | 290 | python | en | code | 0 | github-code | 13 |
3381176707 | import numpy as np
from multiclassLRTrain import multiclassLRTrain
def trainModel(x, y):
param = {}
param['lambda'] = 0.008 # Regularization term
param['maxiter'] = 1000 # Number of iterations
param['eta'] = 0.01 # Learning rate
return multiclassLRTrain(x, y, param)
""" Testset=2
Digits-normal.mat: LBP Features sqrt
lambda=0.006, eta=0.15 Accuracy=50.40
HOG Features: sqrt
lambda=0.0075, eta=0.18 accuracy=97.60
Pixel Features: Square root normalisation
lambda=0.0035, eta=0.25, accuracy=84.80
Mean normalisation:
l=0.0030, eta=0.275, acc=86
""" | pavitradangati/mini-project-5 | code/trainModel.py | trainModel.py | py | 578 | python | en | code | 0 | github-code | 13 |
71202880979 | def calculator():
correct = False
while not correct:
try:
number1 = int(input("Please enter the first operand:"))
correct = True
except ValueError:
print("Please enter a number")
while correct:
try:
number2 = int(input("Please enter the second operand:"))
correct = False
except ValueError:
print("Please enter a number")
while not correct:
try:
operators = input("Please enter an operator (+,-,*,/):")
if operators == "+" or operators == "-" or operators == "*" or operators == "/":
number1 = int(number1)
number2 = int(number2)
if operators == "+":
answer_plus = number1 + number2
print("Result of", number1, operators, number2, "is", answer_plus)
if operators == "-":
answer_minus = number1 - number2
print("Result of", number1, operators, number2, "is", answer_minus)
if operators == "*":
answer_divide = number1 * number2
print("Result of", number1, operators, number2, "is", answer_divide)
if operators == "/":
answer_multiply = number1 / number2
print("Result of", number1, operators, number2, "is", answer_multiply)
break
else:
print("Unknown operator")
except ZeroDivisionError:
print("Please don't divide by zero")
calculator()
calculator() | jaruwitteng/6230401856-oop-labs | jaruwit-6230401856-lab4/Problem2.py | Problem2.py | py | 1,672 | python | en | code | 0 | github-code | 13 |
28672395305 | """
配置文件
"""
configs = {
'path': {
'monitor': r'E:\data\webspider', # 监测新文件产生的路径
'temp_out': r'E:\data\webspider_temp_out',
'out': r'E:\data\webspider_out', # 结果文件的保存路径
'word_config': r'E:\项目\移动在线本部\词条匹配\rules\test_config.json', # 词条配置文件的位置
'word_config_txt': r'E:\项目\移动在线本部\词条匹配\rules\client_config_entry.txt', # 前端系统同步过来的文件路径
},
'date': {
'start_date': '2018-7-12', # 程序启动日期
'sleep_time': 2, # 程序在没有新文件产生时的休眠时间 秒
'monitor_days': 2 # 监控多少天内的爬虫文件
},
'type': {
'news_type_set': {'1', '6'} # 程序要处理的新闻类型,主要用来筛选对应的词条。1:新闻,2:微博,3:微信,4:app,5:论坛,6:报刊,7:视频,8:博客
},
'db_conn': {
'host': '192.168.100.181',
'database': 'netpomdb_dev',
'user': 'netpomadmin',
'password': '8rQSql27odWWG0K',
'port': 41000,
'charset': 'utf8'
},
'db_table': {
'word_config_table': 'client_config_entry'
},
'debug': {
'word_config_debug': 0 # 0:采用写死的json测试文件,作为词条配置。 1: 采用生产环境中的mysql读取此调配配置
}
}
| SnailDM/rule_matching | config.py | config.py | py | 1,437 | python | zh | code | 4 | github-code | 13 |
73910584016 | import random
import time
try:
only = []
i = j = 0
sTime = eTime = 0.0
number = int(input("请输入要随机几位:"))
xiao = int(input("请输入最小随机数:"))
da = int(input("请输入最大随机数:"))
isOnly = int(input("请输入是否唯一(1/0)"))
sTime = time.perf_counter()
if isOnly == 1:
tempCount = da-xiao+1
if number > tempCount:
print("按唯一,数目不能少于da-xiao")
pass
else:
while i < number:
rand = random.randint(xiao, da)
appear = only.count(rand) # 元素出现次数
if appear == 1:
i = i
j += 1
else:
only.append(rand)
i += 1
elif isOnly == 0:
while i < number:
rand = random.randint(xiao, da)
only.append(rand)
i += 1
else:
print("outputError")
print("\n随机名单如下:")
print("-"*30)
print("", end="\n")
for i in range(len(only)):
print("第%d位,随机生成数是:%d" % (i+1, only[i]))
print("\n")
print("-"*30)
print("\n多余重复次数:%d"%j,end="\n")
eTime = time.perf_counter()
print("此次随机花费您Running time:%0.3f Seconds"%(eTime-sTime))
except Exception as result:
print("error:", result)
| learnemt/py | random.py | random.py | py | 1,473 | python | en | code | 0 | github-code | 13 |
24550131706 | from django import views
from django.urls import path
from .import views
urlpatterns = [
path('', views.home),
path('logup', views.logup),
path('login', views.login),
path('search', views.search),
path('taskes', views.taskes),
]
| saramoh20/ToDo-project | task/urls.py | urls.py | py | 249 | python | en | code | 0 | github-code | 13 |
20336012866 | # complete
# 3
# 1 8 -> 44
# 2 1 -> 2
# 3 10 -> 65
numberOfDataSets = int(input())
dataSets = []
for i in range(0, numberOfDataSets):
currentInput = input().split(" ")
dataSets.append(currentInput)
for i in dataSets:
print(i[0], end=" ")
print(int(int(i[1]) * ((int(i[1]) + 1) / 2) + int(i[1])))
| LukeDul/kattis | chanukah.py | chanukah.py | py | 325 | python | en | code | 0 | github-code | 13 |
5884326715 | # ------------------------------------------
#
# Program created by Maksim Kumundzhiev
#
#
# email: kumundzhievmaxim@gmail.com
# github: https://github.com/KumundzhievMaxim
# -------------------------------------------
"""
A left rotation operation on an array shifts each of the array's elements 1 unit to the left.
For example, if 2 left rotations are performed on array [1,2,3,4,5], then the array would become [3,4,5,1,2].
Given an array a of n integers and a number,d , perform d left rotations on the array.
Return the updated array to be printed as a single line of space-separated integers.
"""
a = [1, 2, 3, 4, 5]
d = 4
def rotate(a, d):
for _ in range(d):
if d == 0:
return a
else:
item = a.pop(0)
a.append(item)
return a
print(rotate(a, d)) | MaxKumundzhiev/Practices-for-Engineers | Algorithms/left_rotation.py | left_rotation.py | py | 821 | python | en | code | 3 | github-code | 13 |
18562250878 | from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import numpy as np
writer = SummaryWriter("../logs")
image_path = "../dataset/hymenoptera_data/train/bees/95238259_98470c5b10.jpg"
img_PIL = Image.open(image_path)
print(type(img_PIL))
img_array = np.array(img_PIL)
print(type(img_array))
print(img_array.shape)
writer.add_image("test", img_array, 2, dataformats='HWC')
# y = 2x
for i in range(100):
writer.add_scalar("y = 2x", 2 * i, i)
writer.close()
| sun1f/code_learning | learn_pytorch/src/test_tb.py | test_tb.py | py | 483 | python | en | code | 1 | github-code | 13 |
8885180322 |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import seaborn as sns
import missingno as msno
import matplotlib.pyplot as plt
import warnings
plt.style.use('ggplot') # other options are 'classic', 'grayscale', 'fivethirtyeight', 'ggplot',
# seaborn-whitegrid', 'seaborn-white', 'seaborn-pastel'
# use "print(plt.style.available)" for more options
warnings.filterwarnings("ignore")
get_ipython().magic('matplotlib inline')
# In[2]:
print(sns.get_dataset_names())
titanic = sns.load_dataset('titanic')
# # Missing Value Analysis
#
# We want to understand the sparsity of our dataset.
# One sound approach is to understand the sparsity of the features that are not complete (i.e. with 1 or more NA)
# In[3]:
# Get the dimensions of the titanic data set
titanic.shape
# In[4]:
# Code attributed to Vivek Srinivasan https://www.kaggle.com/viveksrinivasan/kernels
missingValueColumns = titanic.columns[titanic.isnull().any()].tolist()
msno.bar(titanic[missingValueColumns], figsize=(20,8),color="#34495e",fontsize=20,labels=True,)
# So out of the 15 features we get 4 features that have some NAs. Let's see how they are distributed among the samples.
# In[5]:
# Code attributed to Vivek Srinivasan https://www.kaggle.com/viveksrinivasan/kernels
msno.matrix(titanic[missingValueColumns], width_ratios=(10,1), figsize=(20,8),color=(0.204, 0.286, 0.369), fontsize=20, sparkline=True, labels=True)
# In[15]:
msno.heatmap(titanic[missingValueColumns])
# # Some General Plots
# ## A 2 X 1 plot
# In[6]:
# Code attributed to Jake Vanderplas https://github.com/jakevdp/PythonDataScienceHandbook
plt.figure() # create a plot figure, use figsize argument to change size e.g. figsize = (10, 8)
x = np.linspace(0, 10, 100)
# create the first of two panels and set current axis
plt.subplot(2, 1, 1) # (rows, columns, panel number)
plt.plot(x, np.sin(x))
plt.title('Sin')
# create the second panel and set current axis
plt.subplot(2, 1, 2)
plt.plot(x, np.cos(x))
plt.title('Cos');
# Here's another way of plotting using an object oriented interface.
# In[7]:
# Code attributed to Jake Vanderplas https://github.com/jakevdp/PythonDataScienceHandbook
# First create a grid of plots
# ax will be an array of two Axes objects
fig, ax = plt.subplots(2)
# Call plot() method on the appropriate object
ax[0].plot(x, np.sin(x))
ax[0].set_title('Sin')
ax[1].plot(x, np.cos(x))
ax[1].set_title('Cos');
# Here's a shorter way of writing the code.
# In[8]:
# Code attributed to Jake Vanderplas https://github.com/jakevdp/PythonDataScienceHandbook
# First create a grid of plots
# ax will be an array of two Axes objects
fig, axes = plt.subplots(2) # use figsize argument to change size e.g. figsize = (10, 8)
for lab, func, ax in zip(['Sin', 'Cos'], [np.sin, np.cos], axes):
# Call plot() method on the appropriate object
ax.plot(x, func(x))
ax.set_title(lab)
# Here's another way of plotting using itertools and the gridspec module from matplotlib.
# In[9]:
# Code attributed to Sebastian Raschka https://sebastianraschka.com/books.html
import matplotlib.gridspec as gridspec
import itertools
gs = gridspec.GridSpec(2, 1)
fig = plt.figure() # use figsize argument to change size e.g. figsize = (10, 8)
for lab, func, grd in zip(['Sin', 'Cos'],
[np.sin, np.cos],
itertools.product([0, 1], repeat=1)):
ax = plt.subplot(gs[grd[0]])
plt.plot(x, func(x))
plt.title(lab)
# In[10]:
list(itertools.product([0, 1], repeat=3))
# In[11]:
fig = plt.figure()
ax = plt.axes()
x = np.linspace(0, 10, 1000)
ax.plot(x, np.sin(x));
# In[12]:
plt.plot(x, np.sin(x))
plt.axis('tight');
# In[13]:
plt.plot(x, np.sin(x))
plt.axis('equal');
# In[14]:
plt.axis
# In[ ]:
| michaelbasca/jupyter-reference-guide | scripts/Quick_reference.py | Quick_reference.py | py | 3,856 | python | en | code | 0 | github-code | 13 |
17055323834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LinkMallCallBackInfo(object):
def __init__(self):
self._action = None
self._bizid = None
self._extinfo = None
self._lmuserid = None
self._promotionid = None
self._promotioninstanceid = None
self._taobaoid = None
self._time_stamp = None
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def bizid(self):
return self._bizid
@bizid.setter
def bizid(self, value):
self._bizid = value
@property
def extinfo(self):
return self._extinfo
@extinfo.setter
def extinfo(self, value):
self._extinfo = value
@property
def lmuserid(self):
return self._lmuserid
@lmuserid.setter
def lmuserid(self, value):
self._lmuserid = value
@property
def promotionid(self):
return self._promotionid
@promotionid.setter
def promotionid(self, value):
self._promotionid = value
@property
def promotioninstanceid(self):
return self._promotioninstanceid
@promotioninstanceid.setter
def promotioninstanceid(self, value):
self._promotioninstanceid = value
@property
def taobaoid(self):
return self._taobaoid
@taobaoid.setter
def taobaoid(self, value):
self._taobaoid = value
@property
def time_stamp(self):
return self._time_stamp
@time_stamp.setter
def time_stamp(self, value):
self._time_stamp = value
def to_alipay_dict(self):
params = dict()
if self.action:
if hasattr(self.action, 'to_alipay_dict'):
params['action'] = self.action.to_alipay_dict()
else:
params['action'] = self.action
if self.bizid:
if hasattr(self.bizid, 'to_alipay_dict'):
params['bizid'] = self.bizid.to_alipay_dict()
else:
params['bizid'] = self.bizid
if self.extinfo:
if hasattr(self.extinfo, 'to_alipay_dict'):
params['extinfo'] = self.extinfo.to_alipay_dict()
else:
params['extinfo'] = self.extinfo
if self.lmuserid:
if hasattr(self.lmuserid, 'to_alipay_dict'):
params['lmuserid'] = self.lmuserid.to_alipay_dict()
else:
params['lmuserid'] = self.lmuserid
if self.promotionid:
if hasattr(self.promotionid, 'to_alipay_dict'):
params['promotionid'] = self.promotionid.to_alipay_dict()
else:
params['promotionid'] = self.promotionid
if self.promotioninstanceid:
if hasattr(self.promotioninstanceid, 'to_alipay_dict'):
params['promotioninstanceid'] = self.promotioninstanceid.to_alipay_dict()
else:
params['promotioninstanceid'] = self.promotioninstanceid
if self.taobaoid:
if hasattr(self.taobaoid, 'to_alipay_dict'):
params['taobaoid'] = self.taobaoid.to_alipay_dict()
else:
params['taobaoid'] = self.taobaoid
if self.time_stamp:
if hasattr(self.time_stamp, 'to_alipay_dict'):
params['time_stamp'] = self.time_stamp.to_alipay_dict()
else:
params['time_stamp'] = self.time_stamp
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LinkMallCallBackInfo()
if 'action' in d:
o.action = d['action']
if 'bizid' in d:
o.bizid = d['bizid']
if 'extinfo' in d:
o.extinfo = d['extinfo']
if 'lmuserid' in d:
o.lmuserid = d['lmuserid']
if 'promotionid' in d:
o.promotionid = d['promotionid']
if 'promotioninstanceid' in d:
o.promotioninstanceid = d['promotioninstanceid']
if 'taobaoid' in d:
o.taobaoid = d['taobaoid']
if 'time_stamp' in d:
o.time_stamp = d['time_stamp']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/LinkMallCallBackInfo.py | LinkMallCallBackInfo.py | py | 4,300 | python | en | code | 241 | github-code | 13 |
44023969943 | #!/usr/bin python2.7
#acquery.py
#Retrieval and audio playback of Audio Commons sounds using OSC server
#listening to text-based queries
#
#The example retrieves sounds from Freesound using the Audio Commons API.
#
#Dependencies:
#pyosc: https://github.com/ptone/pyosc/
#
#Usage:
#python acquery.py
#(uses default OSC address /query and port 7777)
#
#python acquery.py <OSC address> <OSC port>
#(uses specified OSC address and port)
#example: python freesoundquery.py /play 8888
#
#Useful links:
#Audio Commons API:
#http://m2.audiocommons.org
#http://m2.audiocommons.org/#/search/searchAudioClips
#
#Here, HTTP GET requests are handled with the Python requests package
#(http://docs.python-requests.org/en/master/). Help can be found here:
#https://flask-restless.readthedocs.io/en/stable/searchformat.html
#
# Copyright (c) 2017-2018, Mathieu Barthet, Some Rights Reserved
from __future__ import print_function
import freesound,sys,os
from os.path import expanduser
import argparse
import math
from OSC import OSCServer
import subprocess
from multiprocessing import Process
from time import sleep
from threading import Thread
from Queue import Queue, Empty
import signal
from cStringIO import StringIO
import random
import requests
import json
import urllib
import types
home = expanduser("~")
sounddir = home + "/Documents/sounds"
PLAYERPATH = "/usr/bin/afplay"
OSC_PORT_DEFAULT = 7777
#to request your own Freesound API key, go to: https://freesound.org/apiv2/apply
#FREESOUND_KEY = "" #specify your Freesound key here
url = 'http://m2.audiocommons.org/api/audioclips/search'
OSC_ADDRESS_DEFAULT = "/query"
WIFI_INT_DEFAULT = "en0" #default WiFi interface
MIN_DUR = 1 #minimum duration (s) for retrieved sounds
MAX_DUR = 20 #maximum duration (s) for retrieved sounds
SHOWRES = 10 #only displays first SHOWRES results from Freesound
SOUND_RANGE = 5 #play random sound within first SOUND_RANGE retrieved sounds
def download_sound(q,soundurl,soundpath):
#download sound from Freesound given specified URL
#to specify a Freesound key
#params = dict(token=FREESOUND_KEY)
#response = requests.get(soundurl, params=params)
response = requests.get(soundurl)
print(urllib.unquote(response.url).decode('utf8'))
try:
assert response.status_code == 200 #response OK
#stores the sound at specified location
with open (soundpath, 'wb') as f:
f.write(response.content)
except AssertionError:
print('There is an issue with the HTTP GET request to download the sound. You may need to specify a Freesound key.')
#Queries sounds from Freesound given text-based search and filtering options
def retrieve_sound(keyword):
soundpath = [] #stays empty if no sounds can be found matching the criteria
queue = Queue() #queue for process
print("Query")
#specifies the query through keyword and filtering options
#in this example, the selected source content provider is Freesound
params = dict(pattern=keyword, source="freesound")
response = requests.get(url, params=params)
print(urllib.unquote(response.url).decode('utf8'))
try:
assert response.status_code == 200 #response OK
restext = response.text
#print(restext)
resdic = json.loads(restext)
if (resdic!={} and resdic["results"]!=[]): #checks that the query returned results
nsounds = len(resdic["results"][0]["members"])
print("Number of results for query %s: %d"%(keyword, nsounds))
if nsounds>=1: #if there is at least 1 sound matching the criteria
#download random result within first SOUND_RANGE sounds if no of sounds
#is greater than 1
if nsounds==1:
sound_index=0
else:
sound_index = random.randint(0,min(SOUND_RANGE,nsounds))
print("Sound number %d has been picked up randomly."%sound_index)
soundname = resdic["results"][0]["members"][sound_index]["content"]["title"]
print("Sound name: %s"%soundname)
#picks an mp3 version of the sound
for i in resdic["results"][0]["members"][sound_index]["content"]["availableAs"]:
audioencoding = i["hasAudioEncodingFormat"]
if (audioencoding=="ebu-codecs:_8.4" or "mp3" in audioencoding): #mp3
soundurl = i["locator"] #selects the first mp3 version
break
print("Downloading ",soundurl)
soundpath = os.path.join(sounddir,soundname + ".mp3")
print("Sound download location: ",soundpath)
if os.path.exists(soundpath) == 0: #if the file has not yet been downloaded
#starts a process to download the sound
p = Process(target=download_sound, args=(queue, soundurl, soundpath))
p.start()
p.join() # this blocks until the process terminates
print("Sound downloaded.")
else: #sound already exists
print("Sound already exists.")
else:
print("The query did not provide any results.")
except AssertionError:
print('There is an issue with the HTTP GET request to query sounds.')
return soundpath
def play_sound(soundpath):
if os.path.exists(soundpath):
print("Sound downloaded, ready to be played!")
#uses the afplay player to play the sound
playcmd = "afplay %s"%'''"''' + soundpath + '''"'''
print("About to execute following command:",playcmd)
playcmdnoshell=["afplay","%s"%soundpath]
pplay = subprocess.Popen(playcmdnoshell,shell=False,stderr=subprocess.PIPE)
else:
print("Sound can't be found at location.")
# from pyosc example:
# this method of reporting timeouts only works by convention
# that before calling handle_request() field .timed_out is
# set to False
def handle_timeout(self):
self.timed_out = True
# adapted from pyosc example
def user_callback(path, tags, args, source):
# query is determined by path:
# we just throw away all slashes and join together what's left
oscadd = ''.join(path.split("/"))
print("osc address:", oscadd)
# expected OSC message with tag containing 's'
# args is a OSCMessage with data
# source is where the message came from (in case you need to reply)
#print ("Now do something with", user,args[2],args[0],1-args[1])
#If there are multiple keywords separated by spaces and lines, we only take the first one
#(the current version of the Audio Commons API doesn't handle multiple keywords).
keyword = args[0]
keyword = ' '.join([line.strip() for line in keyword.strip().splitlines()])
keyword = keyword.split(" ")[0].lstrip()
print("keyword:%s"%keyword)
try:
keyword.decode('ascii') #checks that the keyword is encoded in ASCII
print ("Process OSC address",oscadd,"with keyword:",keyword)
path = retrieve_sound(keyword)
if path != []:
print ("Finished retrieving sound.")
play_sound(path)
except UnicodeDecodeError:
print("The keyword needs to be a ascii-encoded unicode string.")
# from pyosc example
def quit_callback(path, tags, args, source):
# don't do this at home (or it'll quit blender)
global run
run = False
# from pyosc example
# user script that's called by the game engine every frame
def each_frame(server):
# clear timed_out flag
server.timed_out = False
# handle all pending requests then return
while not server.timed_out:
server.handle_request()
def main():
if len(sys.argv)>1:
OSC_ADDRESS = str(sys.argv[1])
else:
OSC_ADDRESS = OSC_ADDRESS_DEFAULT
if len(sys.argv)>2:
OSC_PORT = int(sys.argv[2])
else:
OSC_PORT = OSC_PORT_DEFAULT
if len(sys.argv)>3:
WIFI_INT = str(sys.argv[3])
else:
WIFI_INT = WIFI_INT_DEFAULT
#creates directory to store sounds
if not os.path.exists(sounddir):
os.makedirs(sounddir)
print("Sound directory: ",sounddir)
#example of use of pyosc,
#see: https://github.com/ptone/pyosc/blob/master/examples/knect-rcv.py
#we use here the WiFi interface provided on device en0
ipcmd = "ipconfig getifaddr %s"%WIFI_INT
print(ipcmd)
ipaddress = os.popen(ipcmd).read().rstrip()
print("IP address: ",ipaddress)
server = OSCServer((ipaddress, OSC_PORT))
server.timeout = 0
global run
run = True
print("Listening to OSC address",OSC_ADDRESS,"on port",OSC_PORT)
#python's way to add a method to an instance of a class
server.handle_timeout = types.MethodType(handle_timeout, server)
server.addMsgHandler(OSC_ADDRESS, user_callback)
#sound query engine
try:
while run:
#sleep(1)
#call user script
each_frame(server)
except KeyboardInterrupt: #to quit program
print("\n")
pass
server.close()
if __name__ == "__main__":
main()
| mabara/oscaudiocommons | acquery.py | acquery.py | py | 8,635 | python | en | code | 0 | github-code | 13 |
29848062496 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('FLASK_SECRET') or 'shhh-they-will-never-know'
SESSION_TYPE = "sqlalchemy"
SESSION_USE_SIGNER = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SESSION_SQLALCHEMY_TABLE = "session"
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL") or \
"sqlite:///" + os.path.join(basedir, "session.db")
choice_type = {
"binary": ["Yes", "No"]
}
questions = [
{
"question":
"Are your outputs business critical?",
"choices": choice_type["binary"],
"next_index":1
},
{
"question":
"Are users of your code able to program in the appropriate"
" language?",
"choices": choice_type["binary"],
"next_index": None
}
]
default_scores = {
"documentation": 0,
"peer_review": 0,
"testing": 0,
}
class Question(object):
def __init__(
self,
question,
question_type,
choices,
next_question
):
self.question = question
self.question_type = question_type,
self.choices = choices
self.next_question = next_question
def add_choices(self):
pass
def set_next_question(self, next_question):
pass
| foster999/project-planner | app/config.py | config.py | py | 1,405 | python | en | code | 0 | github-code | 13 |
7159701447 | import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
parking_db = myclient["parking_db"]
parking = parking_db["parking"]
# print(myclient.list_database_names())
parking_lot_size = 0
parking_lot_capacity = 0
command_map = {
"Create_parking_lot": 1,
"Park": 2,
"Slot_numbers_for_driver_of_age": 3,
"Slot_number_for_car_with_number": 4,
"Vehicle_registration_number_for_driver_of_age": 5,
"Leave": 6
}
def get_command():
try:
command = input().strip()
except EOFError:
return -1, ""
return command_map[command.split()[0]], command
def make_parking_lot(command):
global parking_lot_size, parking_lot_capacity, parking
temp = command.split(' ')
parking_lot_size = int(temp[1])
parking.delete_many({})
def park(command):
global parking_lot_size, parking_lot_capacity, parking
temp = command.split(' ')
if(parking_lot_size == parking_lot_capacity):
print("Sorry No Space In the Parking")
return
reg_no = temp[1]
age = int(temp[3])
parking_lot_size += 1
all_slots = parking.find({}).sort("slot_no", pymongo.ASCENDING)
empty_slot = 1
for slot in all_slots:
if(slot["slot_no"] == empty_slot):
empty_slot += 1
else:
break
slot_info = {
"slot_no": empty_slot,
"reg_no": reg_no,
"driver_age": age
}
parking.insert_one(slot_info)
def find_slots_for_driver_age(command):
global parking_lot_size, parking_lot_capacity, parking
age = int(command.split(' ')[1])
slots = parking.find({"driver_age": age})
print("Slot Numbers are:")
for slot in slots:
print(slot["slot_no"])
def find_slot_no(command):
global parking_lot_size, parking_lot_capacity, parking
reg_no = command.split(' ')[1]
slot = parking.find_one({"reg_no" : reg_no})
print("Slot Number is:", slot["slot_no"])
def find_regs(command):
global parking_lot_size, parking_lot_capacity, parking
age = int(command.split(' ')[1])
slots = parking.find({"driver_age": age})
print("Registration Numbers are:")
for slot in slots:
print(slot["reg_no"])
def leave_parking(command):
global parking_lot_size, parking_lot_capacity, parking
if(parking_lot_size == 0):
print("Parking is empty")
return
slot_no = int(command.split()[1])
parking.delete_one({"slot_no": slot_no})
parking_lot_size -= 1
def exec(command_type, command):
if(command_type == 1):
print(command, "running", ".........")
make_parking_lot(command)
print("Done..........")
elif(command_type == 2):
print(command, "running", ".........")
park(command)
print("Done..........")
elif(command_type == 3):
print(command, "running", ".........")
find_slots_for_driver_age(command)
print("Done..........")
elif(command_type == 4):
print(command, "running", ".........")
find_slot_no(command)
print("Done..........")
elif(command_type == 5):
print(command, "running", ".........")
find_regs(command)
print("Done..........")
elif(command_type == 6):
print(command, "running", ".........")
leave_parking(command)
print("Done..........")
else:
print(command, "Some Problem with the Input")
def main():
command_type, command = get_command()
while(command_type != -1):
# print(command_type, command)
exec(command_type, command)
command_type, command = get_command()
if __name__ == '__main__':
main() | may98ank/Parking-Assignment | app.py | app.py | py | 3,271 | python | en | code | 1 | github-code | 13 |
2526197966 | import sys
import time
sys.path.append("../..")
from qunetsim.backends import CQCBackend
from qunetsim.components.host import Host
from qunetsim.components.network import Network
def main():
backend = CQCBackend()
network = Network.get_instance()
nodes = ["Alice", "Bob", "Eve", "Dean"]
network.start(nodes, backend)
network.delay = 0.7
hosts = {'alice': Host('Alice', backend),
'bob': Host('Bob', backend)}
network.delay = 0
# A <-> B
hosts['alice'].add_connection('Bob')
hosts['bob'].add_connection('Alice')
hosts['alice'].start()
hosts['bob'].start()
for h in hosts.values():
network.add_host(h)
hosts['alice'].send_classical(hosts['bob'].host_id, 'Hello Bob', False)
hosts['bob'].send_classical(hosts['alice'].host_id, 'Hello Alice', False)
i = 0
bob_messages = hosts['bob'].classical
while i < 5 and len(bob_messages) == 0:
bob_messages = hosts['bob'].classical
i += 1
time.sleep(1)
i = 0
alice_messages = hosts['alice'].classical
while i < 5 and len(alice_messages) == 0:
alice_messages = hosts['alice'].classical
i += 1
time.sleep(1)
assert len(alice_messages) > 0
assert alice_messages[0].sender == hosts['bob'].host_id
assert alice_messages[0].content == 'Hello Alice'
assert (len(bob_messages) > 0)
assert (bob_messages[0].sender == hosts['alice'].host_id)
assert (bob_messages[0].content == 'Hello Bob')
print("All tests succesfull!")
network.stop(True)
exit()
if __name__ == '__main__':
main()
| rheaparekh/QuNetSim | tests/integration_test_single_hop/send_classical_check.py | send_classical_check.py | py | 1,613 | python | en | code | null | github-code | 13 |
26084287123 | from controls import *
from game import *
from random import randint
from online import *
from threading import Thread
from header import get_music
import subprocess
import sys
class SceneLogo (Scene):
"""scene with my logo"""
def __init__(self, time=5000, *argv):
Scene.__init__(self, *argv)
self.run = 0
self.time = time
self.revers = 0
def _start(self):
self.manager.load_images()
if self.manager.message != "end":
tmp = None
try:
tmp = get_music("loading.ogg")
except Exception:
print("Music file for loading not find")
self.thread = Thread(target=self.manager.load_music)
self.thread.setDaemon(True)
self.thread.start()
if tmp is not None:
self.music_stream = tmp.play(loops=1)
self.music_stream.set_volume(0.3)
else:
self.music_stream = None
self.back = self.img_dict[Img.load_background]
self.pos = (0, 0)
def _event(self, event):
if (pygame.KEYDOWN in event or pygame.MOUSEBUTTONUP in event or
self.run > self.time)\
and self.manager.message == "end" and not self.is_end():
if self.music_stream is not None:
self.music_stream.stop()
self.the_end()
self.music_thread = Thread(target=play_music)
self.music_thread.setDaemon(True)
self.music_thread.start()
def _update(self, dt):
self.run += dt
if self.run//200 > self.revers:
self.back, self.pos = self.rot_center(
image=self.img_dict[Img.load_background],
angle=-11*self.revers)
self.revers += 1
def _draw(self, dt):
pos0 = self.pos[0]-self.manager.options[Opt.width]/4
pos1 = self.pos[1]-self.manager.options[Opt.height]
pos = pos0, pos1
self.display.blit(self.back, pos)
tmp0 = self.img_dict[Img.logo].get_rect().w//2
tmp1 = self.display.get_rect().w//2 - tmp0
tmp2 = self.display.get_rect().h//2 - tmp0
self.display.blit(self.img_dict[Img.logo],
(tmp1, tmp2))
tmp0 = self.img_dict[Img.loading_end].get_rect().w/2
tmp1 = self.display.get_rect().w/2 - tmp0
tmp2 = self.img_dict[Img.loading_end].get_rect().h/2
tmp3 = self.display.get_rect().h/2 + tmp2
if self.manager.message == "end" and self.music_stream is not None:
self.display.blit(self.img_dict[Img.loading_end],
(tmp1, tmp3))
def rot_center(self, image, pos=(0, 0), angle = 0):
rot_im = pygame.transform.rotate(image, angle)
ow, oh = image.get_size()
rw, rh = rot_im.get_size()
scale_x, scale_y = float(rw) / ow, float(rh) / oh
dx = round((ow / 2.0) * scale_x - (ow / 2.0))
dy = round((oh / 2.0) * scale_y - (oh / 2.0))
new_pos = pos[0] - dx, pos[1] - dy
return rot_im, new_pos
class MainScene (MenuScene):
"""Main scene"""
def __init__(self, *argv):
MenuScene.__init__(self, *argv)
self.controls = []
self.elem = ME.main
self.menu = None
self.server = None
self.thread = None
self.game_type = {GM.opponent: Player.pc, GM.type: Revers, GM.size: 10}
self.menu_items = {ME.main: (("Online Game", self.button_online),
("New Game", self.button_game),
("Load Game", self.button_load),
("Options", self.button_options),
("About", self.button_about),
("Records", self.button_record),
("Exit", self.button_exit)),
ME.options: (("Back", self.button_back),
("Style", self.button_style),
("Window", self.button_window)),
ME.online: (("Back", self.button_back),
("Client", self.button_client),
("Host", self.button_host)),
ME.message: (("OK", self.button_back),)}
def _start(self, controls=[]):
self.controls = controls
width = self.img_dict[Img.left_substrate].get_rect().w
self.menu = self.create_menu(cord=(10, 10),
elements=self.menu_items[self.elem],
width=width)
def _draw(self, dt):
self.display.blit(self.img_dict[Img.background], (0, 0))
self.display.blit(self.img_dict[Img.left_substrate], (0, 0))
if self.controls:
self.display.blit(self.img_dict[Img.right_substrate],
(self.img_dict[Img.left_substrate].get_rect().w,
0))
for control in self.controls:
control.draw(self.display)
self.menu.draw(self.display)
def _event(self, event):
self.event(event)
for control in self.controls:
control.event(event)
def button_back(self):
self.manager.music_dict[MUS.step].play()
self.elem = ME.main
self._start([])
self.manager.load_options()
self.game_type = {GM.opponent: Player.pc, GM.type: Revers, GM.size: 10}
self.thread = None
if self.server is not None:
self.server.close()
self.server = None
def button_exit(self):
self.manager.music_dict[MUS.step].play()
self.set_next_scene(SceneLogo(2000))
self.the_end()
def button_online(self):
self.manager.music_dict[MUS.step].play()
self.elem = ME.online
self._start([])
def button_options(self):
self.manager.music_dict[MUS.step].play()
self.elem = ME.options
self._start([])
def button_style(self):
self.manager.music_dict[MUS.step].play()
self.manager.load_options()
style_one = Label('Select type of style:',
self.manager.options[Opt.height]//15,
(self.manager.options[Opt.width]//4.5,
int(self.manager.options[Opt.height]*0.05)))
style_two = RadioButtons((self.manager.options[Opt.width]//4.5,
int(self.manager.options[Opt.height] *
0.05+style_one.size())),
self.select_point,
(self.manager.options[Opt.point_b],
self.manager.options[Opt.point_w],
self.manager.options[Opt.point],
self.manager.options[Opt.black_hall]),
True,
self.img_dict[Img.radio_select],
self.img_dict[Img.radio_no_select])
style_two.add_menu_item(self.img_dict[Img.deselect_stock_point],
self.img_dict[Img.select_stock_point],
("point_black.png", "point_white.png",
"point.png", "black_hall.png"))
style_two.add_menu_item(self.img_dict[Img.deselect_politics_point],
self.img_dict[Img.select_politics_point],
("usa.png", "russian.png",
"ukraine.png", "german.png"))
self._start([style_one, style_two])
def button_window(self):
self.manager.music_dict[MUS.step].play()
self.manager.load_options()
x = self.manager.options[Opt.width]//4.5
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('Select resolution:',
self.manager.options[Opt.height]//15,
(x, y))
y += label1.size()
rb1 = RadioButtons((x, y),
self.select_resolution,
(self.manager.options[Opt.width],
self.manager.options[Opt.height]),
True,
self.img_dict[Img.radio_select],
self.img_dict[Img.radio_no_select])
font = pygame.font.SysFont("Monospace",
self.manager.options[Opt.height]//20,
bold=False, italic=False)
b_font = pygame.font.SysFont("Monospace",
self.manager.options[Opt.height]//20,
bold=True, italic=False)
rb1.add_menu_item(font.render("640x360", True, (255, 255, 255)),
b_font.render("640x360", True, (255, 255, 255)),
(640, 360))
rb1.add_menu_item(font.render("1280x720", True, (255, 255, 255)),
b_font.render("1280x720", True, (255, 255, 255)),
(1280, 720))
rb1.add_menu_item(font.render("1600x900", True, (255, 255, 255)),
b_font.render("1600x900", True, (255, 255, 255)),
(1600, 900))
rb1.add_menu_item(font.render("1920x1080", True, (255, 255, 255)),
b_font.render("1920x1080", True, (255, 255, 255)),
(1920, 1080))
y += self.img_dict[Img.radio_select].get_rect().h
y += b_font.render("0", True, (255, 255, 255)).get_rect().h+30
label2 = Label('Select windowed or fullscreen',
self.manager.options[Opt.height]//15,
(x, y))
y += label2.size()
rb2 = RadioButtons((x, y),
self.select_full_screen,
self.manager.options[Opt.full_screen],
True,
self.img_dict[Img.radio_select],
self.img_dict[Img.radio_no_select])
rb2.add_menu_item(font.render("Windowed", True, (255, 255, 255)),
b_font.render("Windowed",
True, (255, 255, 255)),
False)
rb2.add_menu_item(font.render("FullScreen", True, (255, 255, 255)),
b_font.render("FullScreen",
True, (255, 255, 255)),
True)
y += self.img_dict[Img.radio_select].get_rect().h
y += b_font.render("0", True, (255, 255, 255)).get_rect().h+30
label3 = Label('Reboot for the take effect',
self.manager.options[Opt.height]//15,
(x, y))
y += label3.size()
b1 = Button("Restart", self.manager.options[Opt.height]//15,
(x, y), self.button_restart)
self._start([label1, label2, label3, rb1, rb2, b1])
def button_game(self):
self.manager.music_dict[MUS.step].play()
x = self.manager.options[Opt.width]//4.5
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('Select opponent:',
self.manager.options[Opt.height]//15,
(x, y))
y += label1.size()
font = pygame.font.SysFont("Monospace",
self.manager.options[Opt.height]//20,
bold=False, italic=False)
b_font = pygame.font.SysFont("Monospace",
self.manager.options[Opt.height]//20,
bold=True, italic=False)
rb1 = RadioButtons((x, y),
self.select_opponent,
self.game_type[GM.opponent],
True,
self.img_dict[Img.radio_select],
self.img_dict[Img.radio_no_select])
rb1.add_menu_item(font.render("Player vs PC.", True,
(255, 255, 255)),
b_font.render("Player vs PC.", True,
(255, 255, 255)),
Player.pc)
rb1.add_menu_item(font.render("Player vs Player.", True,
(255, 255, 255)),
b_font.render("Player vs Player.", True,
(255, 255, 255)),
Player.man)
y += self.img_dict[Img.radio_select].get_rect().h
y += b_font.render("0", True, (255, 255, 255)).get_rect().h+30
label2 = Label('Select game type:',
int(self.manager.options[Opt.height]/15),
(x, y))
y += label2.size()
rb2 = RadioButtons((x, y),
self.select_type,
self.game_type[GM.type],
True,
self.img_dict[Img.radio_select],
self.img_dict[Img.radio_no_select])
rb2.add_menu_item(font.render("Original revers.", True,
(255, 255, 255)),
b_font.render("Original revers.", True,
(255, 255, 255)),
Revers)
rb2.add_menu_item(font.render("Revers with blackhall.", True,
(255, 255, 255)),
b_font.render("Revers with blackhall.", True,
(255, 255, 255)),
ReversWithBlackHall)
y += self.img_dict[Img.radio_select].get_rect().h
y += b_font.render("0", True, (255, 255, 255)).get_rect().h+30
label3 = Label('Select size:',
int(self.manager.options[Opt.height]/15),
(x, y))
y += label3.size()
b1 = Button("Start", int(self.manager.options[Opt.height]/15),
(x, y+label3.size()), self.button_create_game)
ti = TextInput(int(self.manager.options[Opt.height]/15),
(x, y),
self. manager.img_dict[Img.right_substrate],
2,
is_in_4_to_20,
"size ok",
"4 <= size <= 20",
b1,
"10")
self._start([label1, label2, label3, rb1, rb2, ti])
def button_load(self):
self.manager.music_dict[MUS.step].play()
try:
dump = self.manager.load_game()
if dump is None:
return
self.game_type[GM.opponent] = Player.pc\
if dump[GM.opponent] == "pc" else Player.man
self.game_type[GM.rnd] = dump[GM.rnd]
self.game_type[GM.size] = dump[GM.size]
self.game_type[GM.type] == Revers \
if dump[GM.type] == "Revers" else ReversWithBlackHall
game = self.game_type[GM.type](self.game_type[GM.size],
self.manager,
Player.man,
self.game_type[GM.opponent],
self.game_type[GM.rnd])
game.start()
game.field = dump[GM.field]
game.person = dump[GM.person]
game.valid_path = game.get_valid_path()
game.points = {-1: 0, 1: 0}
for row in game.field:
for column in row:
if column == 1 or column == -1:
game.points[column] += 1
game.interface.set_information(game.points,
game.person,
game.players)
self.set_next_scene(GameScene(game, self.game_type))
self.the_end()
except:
self.create_message("Loading error")
def button_record(self):
if self.manager.records is None:
self.create_message("Error in open file of records")
return
self.manager.music_dict[MUS.step].play()
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('TABLE OF RECORDS',
int(self.manager.options[Opt.height]/15),
(x, y))
y += label1.size()
records_lst = []
for i in range(4, 21):
value = self.manager.records[i][0]
records_lst.append(("size " + str(i) + ": " + value[0] + " - " +
("" if value[1] == 0 else str(value[1])),
self.open_record,
i))
list = self.create_menu(cord=(x, y),
elements=records_lst,
width=2000,
font_size=self.manager.options[Opt.height]//22)
self._start([label1, list])
def open_record(self, i):
self.manager.music_dict[MUS.step].play()
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('RECORDS IN SIZE:' + str(i),
int(self.manager.options[Opt.height]/15),
(x, y))
y += label1.size()
records_lst = []
j = 0
for i in self.manager.records[i]:
records_lst.append((str(j) + ": " +
i[0] + " - " +
("" if i[1] == 0 else str(i[1])),
None))
j += 1
list = self.create_menu(cord=(x, y),
elements=records_lst,
width=2000,
font_size=self.manager.options[Opt.height]//22)
self._start([label1, list])
def button_about(self):
self.manager.music_dict[MUS.step].play()
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('''Revers Game
This app has written by Andrey Novikov
You can choose one of two rules for the game
First: The original rules
Second: The original rules with a black hole
You can play:
With the computer
With your friend on the network
Wiht your friend on this computer
You can choose the size of the playing field
Enjoy the game''',
int(self.manager.options[Opt.height]/22),
(x, y))
self._start([label1])
def button_client(self):
self.manager.music_dict[MUS.step].play()
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('Please input id:',
int(self.manager.options[Opt.height]/15),
(x, y))
y += label1.size()
b1 = Button("Connect", int(self.manager.options[Opt.height]/15),
(x, y+label1.size()), self.button_connect)
text_input = TextInput(int(self.manager.options[Opt.height]/15),
(x, y),
self.img_dict[Img.right_substrate],
15,
is_it_ip,
"it is ip",
"it is not ip",
b1,
"127.0.0.1")
self._start([label1, text_input])
def button_host(self):
self.manager.music_dict[MUS.step].play()
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
font = pygame.font.SysFont("Monospace",
int(self.manager.options[Opt.height]/20),
bold=False, italic=False)
b_font = pygame.font.SysFont("Monospace",
int(self.manager.options[Opt.height]/20),
bold=True, italic=False)
label2 = Label('Select game type:',
int(self.manager.options[Opt.height]/15),
(x, y))
y += label2.size()
rb2 = RadioButtons((x, y),
self.select_type,
self.game_type[GM.type],
True,
self.img_dict[Img.radio_select],
self.img_dict[Img.radio_no_select])
rb2.add_menu_item(font.render("Original revers.",
True, (255, 255, 255)),
b_font.render("Original revers.",
True, (255, 255, 255)),
Revers)
rb2.add_menu_item(font.render("Revers with blackhall.",
True, (255, 255, 255)),
b_font.render("Revers with blackhall.",
True, (255, 255, 255)),
ReversWithBlackHall)
y += self.img_dict[Img.radio_select].get_rect().h
y += b_font.render("0", True, (255, 255, 255)).get_rect().h+30
label3 = Label('Select size:',
int(self.manager.options[Opt.height]/15),
(x, y))
y += label3.size()
b1 = Button("Host", int(self.manager.options[Opt.height]/15),
(x, y+label3.size()), self.button_create_host)
ti = TextInput(int(self.manager.options[Opt.height]/15),
(x, y),
self. manager.img_dict[Img.right_substrate],
2,
is_in_4_to_20,
"size ok",
"4 <= size <= 20",
b1,
"10")
self._start([label2, label3, rb2, ti])
def button_create_game(self, size):
self.manager.music_dict[MUS.step].play()
self.game_type[GM.size] = int(size)
self.game_type[GM.rnd] = randint(0, 1)
game = self.game_type[GM.type](self.game_type[GM.size],
self.manager,
Player.man,
self.game_type[GM.opponent],
self.game_type[GM.rnd])
game.start()
self.set_next_scene(GameScene(game, self.game_type))
self.the_end()
def button_create_host(self, size):
self.manager.music_dict[MUS.step].play()
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
label1 = Label('Waiting client:',
int(self.manager.options[Opt.height]/15),
(x, y))
self.game_type[GM.size] = int(size)
self.thread = Thread(target=self.hosting)
self.thread.setDaemon(True)
self.thread.start()
self._start([label1])
def select_point(self, style):
self.manager.music_dict[MUS.step].play()
self.manager.options[Opt.point_b] = style[0]
self.manager.options[Opt.point_w] = style[1]
self.manager.options[Opt.point] = style[2]
self.manager.options[Opt.black_hall] = style[3]
self.manager.dump_options()
def select_resolution(self, res):
self.manager.music_dict[MUS.step].play()
self.manager.options[Opt.width] = res[0]
self.manager.options[Opt.height] = res[1]
def select_full_screen(self, res):
self.manager.music_dict[MUS.step].play()
self.manager.options[Opt.full_screen] = res
def select_opponent(self, res):
self.manager.music_dict[MUS.step].play()
self.game_type[GM.opponent] = res
def select_type(self, res):
self.manager.music_dict[MUS.step].play()
self.game_type[GM.type] = res
def button_restart(self):
self.manager.music_dict[MUS.step].play()
self.manager.dump_options()
thread = Thread(target=self.sub_function_n1)
thread.setDaemon(True)
thread.start()
sys.exit(0)
def sub_function_n1(self):
subprocess.call("Python main.py")
def hosting(self):
if self.server is not None:
self.server.close()
self.server = Host(maxContacts=1, auto_message_get=False)
self.server.start()
try:
self.game_type[GM.opponent] = Player.online
self.game_type[GM.rnd] = randint(0, 1)
game = self.game_type[GM.type](self.game_type[GM.size],
self.manager,
Player.man,
self.game_type[GM.opponent],
self.game_type[GM.rnd],
self.server)
game.start()
dump = {GM.rnd: self.game_type[GM.rnd],
GM.size: self.game_type[GM.size],
GM.type: "Revers"
if self.game_type[GM.type] == Revers else "ReversBH",
GM.field: game.field}
self.server.send_pickle(0, dump)
self.set_next_scene(GameScene(game, self.game_type))
self.the_end()
except Exception:
self.server.close()
def button_connect(self, ip):
self.manager.music_dict[MUS.step].play()
try:
client = Client()
client.start(ip)
if client.get_pickle()[0] != MSG.id:
self.create_message("Server is busy")
dump = client.get_pickle()
self.game_type[GM.opponent] = Player.online
self.game_type[GM.rnd] = 1-dump[GM.rnd]
self.game_type[GM.size] = dump[GM.size]
self.game_type[GM.type] == Revers\
if dump[GM.type] == "Revers" else ReversWithBlackHall
game = self.game_type[GM.type](self.game_type[GM.size],
self.manager,
Player.man,
self.game_type[GM.opponent],
self.game_type[GM.rnd],
client)
game.start()
game.field = dump[GM.field]
game.valid_path = game.get_valid_path()
game.points = {-1: 0, 1: 0}
for row in game.field:
for column in row:
if column == 1 or column == -1:
game.points[column] += 1
self.set_next_scene(GameScene(game, self.game_type))
self.the_end()
except:
self.create_message("Host not find")
def create_message(self, msg):
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
self.elem = ME.message
m1 = Label(msg, int(self.manager.options[Opt.height]/15), (x, y))
self._start([m1])
class GameScene(MenuScene):
def __init__(self, game, game_type, *argv):
MenuScene.__init__(self, *argv)
self.message = None
self.input = None
self.game = game
self.game_type = game_type
def _start(self):
tmp = self.img_dict[Img.left_substrate].get_rect().w
self.menu = self.create_menu(cord=(10, 10),
elements=(("Quit",
self.button_back),
("Save&Quit",
self.button_save)),
width=tmp)
def _update(self, dt):
if self.message is None:
self.game.update(dt)
def _event(self, event):
if self.input is not None:
self.input.event(event)
else:
if self.game.message is not None:
self.create_message(self.game.message)
else:
self.game.event(event)
self.event(event)
def _draw(self, dt):
self.display.blit(self.img_dict[Img.background], (0, 0))
self.display.blit(self.img_dict[Img.left_substrate], (0, 0))
self.menu.draw(self.display)
self.display.blit(self.img_dict[Img.right_substrate],
(self.img_dict[Img.left_substrate].get_rect().w, 0))
if self.message is None:
self.game.draw(self.display)
else:
self.message.draw(self.display)
if self.input is not None:
self.input.draw(self.display)
def button_save(self):
dump = {GM.opponent: "pc"
if self.game_type[GM.opponent] == Player.pc else "man",
GM.rnd: self.game_type[GM.rnd],
GM.size: self.game_type[GM.size],
GM.type: "Revers"
if self.game_type[GM.type] == Revers else "ReversBH",
GM.person: self.game.person,
GM.field: self.game.field}
self.manager.dump_game(dump)
def button_back(self, name=""):
self.manager.music_dict[MUS.step].play()
if name != "":
ivalue = 0
old = None
winner = 1 if self.game.points[1] > self.game.points[-1] else -1
for value in self.manager.records[self.game.size]:
if old is None:
if self.game.points[winner] > value[1]:
old = value
self.manager.records[self.game.size][ivalue] =\
(name, self.game.points[winner])
else:
new_old = value
self.manager.records[self.game.size][ivalue] = old
old = new_old
ivalue += 1
self.manager.dump_records()
if self.game.online is not None:
self.game.online.send_pickle(0, (-1, -2))
if self.game.online == Host:
self.game.online.close()
self.set_next_scene(MainScene())
self.the_end()
def create_message(self, msg):
x = int(self.manager.options[Opt.width]/4.5)
y = int(self.manager.options[Opt.height]*0.05)
winner = 1 if self.game.points[1] > self.game.points[-1] else -1
if self.game.players[winner] == Player.man:
for value in self.manager.records[self.game.size]:
if self.game.points[winner] > value[1]:
self.message = Label("Your new record is " +
str(self.game.points[winner]) +
".\nEnter your name:",
self.manager.options[Opt.height]//15,
(x, y))
b1 = Button("To record",
int(self.manager.options[Opt.height]/15),
(x, y+self.message.size()*3),
self.button_back)
tmp = TextInput(self.manager.options[Opt.height]//15,
(x, y+self.message.size()*2),
self.manager.img_dict[Img.right_substrate],
10,
None,
"",
"",
b1,
"Man")
self.input = tmp
break
else:
self.message = Label(msg,
int(self.manager.options[Opt.height]/15),
(x, y))
tmp = self.img_dict[Img.left_substrate].get_rect().w
self.menu = self.create_menu(cord=(10, 10),
elements=([("OK", self.button_back)]),
width=tmp)
| Anovi-Soft/Python | Revers_Game/Scene.py | Scene.py | py | 32,642 | python | en | code | 0 | github-code | 13 |
40617262391 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 28 15:41:27 2018
@author: Thanh Tung Khuat
Another method for serial combination of online learning and agglomerative learning gfmm
Using Agglomerative learning to train a base model, then deploy the trained model for online learning with different training data
AggloOnlineGFMM(gamma, teta_onl, teta_agglo, bthres, simil, sing, isDraw, oper, isNorm, norm_range, V_pre, W_pre, classId_pre)
INPUT
gamma Membership function slope (default: 1)
teta_onl Maximum hyperbox size (default: 1) for online learning
teta_agglo Maximum hyperbox size (default: 1) for agglomerative v2 learning
bthres Similarity threshold for hyperbox concatenation (default: 0.5)
simil Similarity measure: 'short', 'long' or 'mid' (default: 'mid')
sing Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')
isDraw Progress plot flag (default: False)
oper Membership calculation operation: 'min' or 'prod' (default: 'min')
isNorm Do normalization of input training samples or not?
norm_range New ranging of input data after normalization, for example: [0, 1]
V_pre Hyperbox lower bounds for the model to be updated using new data
W_pre Hyperbox upper bounds for the model to be updated using new data
classId_pre Hyperbox class labels (crisp) for the model to be updated using new data
ATTRIBUTES:
V Hyperbox lower bounds
W Hyperbox upper bounds
classId Hyperbox class labels (crisp)
cardin Hyperbox cardinalities (the number of training samples is covered by corresponding hyperboxes)
clusters Identifiers of input objects in each hyperbox (indexes of training samples covered by corresponding hyperboxes)
"""
import sys, os
sys.path.insert(0, os.path.pardir)
import ast
import numpy as np
import time
import matplotlib
try:
matplotlib.use('TkAgg')
except:
pass
from functionhelper.preprocessinghelper import loadDataset, string_to_boolean, splitDatasetRndClassBasedTo2Part, splitDatasetRndTo2Part
from GFMM.basebatchlearninggfmm import BaseBatchLearningGFMM
from GFMM.onlinegfmm import OnlineGFMM
from GFMM.accelbatchgfmm import AccelBatchGFMM
from GFMM.batchgfmm_v1 import BatchGFMMV1
from GFMM.batchgfmm_v2 import BatchGFMMV2
class AggloOnlineGFMM(BaseBatchLearningGFMM):
def __init__(self, gamma = 1, teta_onl = 1, teta_agglo = 1, bthres = 0.5, simil = 'mid', sing = 'max', isDraw = False, oper = 'min', isNorm = False, norm_range = [0, 1], V_pre = np.array([], dtype=np.float64), W_pre = np.array([], dtype=np.float64), classId_pre = np.array([], dtype=np.int16)):
BaseBatchLearningGFMM.__init__(self, gamma, teta_onl, isDraw, oper, isNorm, norm_range)
self.teta_onl = teta_onl
self.teta_agglo = teta_agglo
self.V = V_pre
self.W = W_pre
self.classId = classId_pre
self.bthres = bthres
self.simil = simil
self.sing = sing
def fit(self, Xl_onl, Xu_onl, patClassId_onl, Xl_off, Xu_off, patClassId_off, typeOfAgglo = 1):
"""
The input data need to be normalized before using this function
Xl_onl Input data lower bounds (rows = objects, columns = features) for online learning
Xu_onl Input data upper bounds (rows = objects, columns = features) for online learning
patClassId_onl Input data class labels (crisp) for online learning
Xl_off Input data lower bounds (rows = objects, columns = features) for agglomerative learning
Xu_off Input data upper bounds (rows = objects, columns = features) for agglomerative learning
patClassId_off Input data class labels (crisp) for agglomerative learning
typeOfAgglo The used type of agglomerative learning algorithms
"""
time_start = time.clock()
# Perform agglomerative learning
if typeOfAgglo == 1:
aggloClassifier = AccelBatchGFMM(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)
elif typeOfAgglo == 2:
aggloClassifier = BatchGFMMV2(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)
else:
aggloClassifier = BatchGFMMV1(self.gamma, self.teta_agglo, bthres = self.bthres, simil = self.simil, sing = self.sing, isDraw = self.isDraw, oper = self.oper, isNorm = False)
aggloClassifier.fit(Xl_off, Xu_off, patClassId_off)
self.V = aggloClassifier.V
self.W = aggloClassifier.W
self.classId = aggloClassifier.classId
# Perform online learning
onlClassifier = OnlineGFMM(self.gamma, self.teta_onl, self.teta_onl, isDraw = self.isDraw, oper = self.oper, isNorm = False, norm_range = [self.loLim, self.hiLim], V = self.V, W = self.W, classId = self.classId)
# training for online GFMM
onlClassifier.fit(Xl_onl, Xu_onl, patClassId_onl)
self.V = onlClassifier.V
self.W = onlClassifier.W
self.classId = onlClassifier.classId
time_end = time.clock()
self.elapsed_training_time = time_end - time_start
return self
if __name__ == '__main__':
"""
INPUT parameters from command line
arg1: + 1 - training and testing datasets are located in separated files
+ 2 - training and testing datasets are located in the same files
arg2: path to file containing the training dataset (arg1 = 1) or both training and testing datasets (arg1 = 2)
arg3: + path to file containing the testing dataset (arg1 = 1)
+ percentage of the training dataset in the input file
arg4: + True: drawing hyperboxes during the training process
+ False: no drawing
arg5: + Maximum size of hyperboxes of online learning algorithm (teta_onl, default: 1)
arg6: + Maximum size of hyperboxes of agglomerative learning algorithm (teta_agglo, default: 1)
arg7: + gamma value (default: 1)
arg8: + Similarity threshod (default: 0.5)
arg9: + Similarity measure: 'short', 'long' or 'mid' (default: 'mid')
arg10: + operation used to compute membership value: 'min' or 'prod' (default: 'min')
arg11: + do normalization of datasets or not? True: Normilize, False: No normalize (default: True)
arg12: + range of input values after normalization (default: [0, 1])
arg13: + Use 'min' or 'max' (default) memberhsip in case of assymetric similarity measure (simil='mid')
arg14: + Type of agglomerative learning
- 1: Accelerated agglomerative learning AGGLO-2
- 2: Full batch learning slower version
- 3: Full batch learning faster version
arg15: + Percentage of online training data (default: 0.5)
"""
# Init default parameters
if len(sys.argv) < 5:
isDraw = False
else:
isDraw = string_to_boolean(sys.argv[4])
if len(sys.argv) < 6:
teta_onl = 1
else:
teta_onl = float(sys.argv[5])
if len(sys.argv) < 7:
teta_agglo = 1
else:
teta_agglo = float(sys.argv[6])
if len(sys.argv) < 8:
gamma = 1
else:
gamma = float(sys.argv[7])
if len(sys.argv) < 9:
bthres = 0.5
else:
bthres = float(sys.argv[8])
if len(sys.argv) < 10:
simil = 'mid'
else:
simil = sys.argv[9]
if len(sys.argv) < 11:
oper = 'min'
else:
oper = sys.argv[10]
if len(sys.argv) < 12:
isNorm = True
else:
isNorm = string_to_boolean(sys.argv[11])
if len(sys.argv) < 13:
norm_range = [0, 1]
else:
norm_range = ast.literal_eval(sys.argv[12])
if len(sys.argv) < 14:
sing = 'max'
else:
sing = sys.argv[13]
if len(sys.argv) < 15:
typeOfAgglo = 1
else:
typeOfAgglo = int(sys.argv[14])
if len(sys.argv) < 16:
percentOnl = 0.5
else:
percentOnl = float(sys.argv[15])
if sys.argv[1] == '1':
training_file = sys.argv[2]
testing_file = sys.argv[3]
# Read training file
Xtr, X_tmp, patClassIdTr, pat_tmp = loadDataset(training_file, 1, False)
# Read testing file
X_tmp, Xtest, pat_tmp, patClassIdTest = loadDataset(testing_file, 0, False)
else:
dataset_file = sys.argv[2]
percent_Training = float(sys.argv[3])
Xtr, Xtest, patClassIdTr, patClassIdTest = loadDataset(dataset_file, percent_Training, False)
classifier = AggloOnlineGFMM(gamma, teta_onl, teta_agglo, bthres, simil, sing, isDraw, oper, isNorm, norm_range)
Xtr_onl, Xtr_off = splitDatasetRndTo2Part(Xtr, Xtr, patClassIdTr, percentOnl)
classifier.fit(Xtr_onl.lower, Xtr_onl.upper, Xtr_onl.label, Xtr_off.lower, Xtr_off.upper, Xtr_off.label)
# Testing
print("-- Testing --")
result = classifier.predict(Xtest, Xtest, patClassIdTest)
if result != None:
print("Number of wrong predicted samples = ", result.summis)
numTestSample = Xtest.shape[0]
print("Error Rate = ", np.round(result.summis / numTestSample * 100, 2), "%")
| thanhtung09t2/Hyperbox-classifier | GFMM/agglo_onlgfmm.py | agglo_onlgfmm.py | py | 9,804 | python | en | code | 0 | github-code | 13 |
10547654316 | #!/usr/bin/env python
# encoding: utf-8
from django.contrib import admin
from .models import Rating
class RatingAdmin(admin.ModelAdmin):
ordering = ['id', 'rating']
list_display = ['rating', 'comment', 'recipe', 'author']
list_filter = ['recipe', 'author']
search_fields = ['rating', 'comment', ]
admin.site.register(Rating, RatingAdmin)
| open-eats/openeats-api | v1/rating/admin.py | admin.py | py | 359 | python | en | code | 10 | github-code | 13 |
19056602940 | try:
numerator = int(input("Enter the numerator: "))
denominator = int(input("Enter the denominator: "))
if denominator == 0:
print("Cannot divide by zero!")
else:
fraction = numerator / denominator
print(fraction)
except ValueError:
print("Numerator and denominator must be valid numbers!")
except ZeroDivisionError:
print("Cannot divide by zero!")
print("Finished.")
| laijunren/cp1404practicalsLJR | prac-02/exceptions_demo.py | exceptions_demo.py | py | 417 | python | en | code | 1 | github-code | 13 |
74564375378 | from __future__ import (print_function, division)
from future.utils import viewitems
import json
import traceback
import cherrypy
import WMCore.ReqMgr.Service.RegExp as rx
from Utils.Utilities import strToBool
from WMCore.REST.Format import JSONFormat, PrettyJSONFormat
from WMCore.REST.Server import RESTEntity, restcall
from WMCore.REST.Tools import tools
from WMCore.REST.Validation import validate_str
from WMCore.REST.Error import MethodWithoutQueryString
from WMCore.ReqMgr.DataStructs.ReqMgrConfigDataCache import ReqMgrConfigDataCache
from WMCore.ReqMgr.DataStructs.RequestError import InvalidSpecParameterValue
from WMCore.ReqMgr.Utils.Validation import get_request_template_from_type
from WMCore.WMSpec.WMWorkload import WMWorkloadHelper
def format_algo_web_list(task_name, task_type, split_param, algo_config):
fdict = {"taskName": task_name}
fdict["taskType"] = task_type
default_algo = split_param["algorithm"]
algo_list = algo_config["algo_list_by_types"][task_type]
param_list = []
if default_algo in algo_list:
new_param = {"algorithm": default_algo}
for key, value in viewitems(split_param):
if key in algo_config["algo_params"][default_algo]:
new_param[key] = value
param_list.append(new_param)
elif default_algo == "":
raise cherrypy.HTTPError(400, "Algorithm name is empty: %s" % split_param)
else:
param_list.append(split_param)
# If task type is merge don't allow change the algorithm
if fdict["taskType"] != "Merge":
for algo in algo_list:
if algo != default_algo:
param = {"algorithm": algo}
param.update(algo_config["algo_params"][algo])
param_list.append(param)
fdict["splitParamList"] = param_list
return fdict
def create_web_splitting_format(split_info):
web_form = []
splitSettings = ReqMgrConfigDataCache.getConfig("EDITABLE_SPLITTING_PARAM_CONFIG")
for sp in split_info:
# skip Cleanup and LogCollect: don't allow change the param
if sp["taskType"] not in ["Cleanup", "LogCollect"]:
web_form.append(format_algo_web_list(sp["taskName"], sp["taskType"],
sp["splitParams"], splitSettings))
return web_form
def create_updatable_splitting_format(split_info):
"""
_create_updatable_splitting_format_
Returns the workflow job splitting without parameters that
cannot be updated in the POST call.
"""
splitInfo = []
splitSettings = ReqMgrConfigDataCache.getConfig("EDITABLE_SPLITTING_PARAM_CONFIG")
for taskInfo in split_info:
if taskInfo["taskType"] not in ["Cleanup", "LogCollect"]:
splittingAlgo = taskInfo["splitAlgo"]
submittedParams = taskInfo["splitParams"]
splitParams = {}
for param in submittedParams:
validFlag, _ = _validate_split_param(splittingAlgo, param, splitSettings)
if validFlag:
splitParams[param] = taskInfo["splitParams"][param]
taskInfo["splitParams"] = splitParams
splitInfo.append(taskInfo)
return splitInfo
def _validate_split_param(split_algo, split_param, algo_config):
"""
validate param for editing, also returns param type
"""
valid_params = algo_config["algo_params"][split_algo]
if split_param in valid_params:
if isinstance(valid_params[split_param], bool):
cast_type = bool
else:
cast_type = int
return (True, cast_type)
else:
return (False, None)
def _assign_key_value(keyname, keyvalue, return_params, cast_type):
if cast_type is None:
return_params[keyname] = keyvalue
elif cast_type == bool:
try:
return_params[keyname] = strToBool(keyvalue)
except ValueError:
msg = "%s expects a boolean value, you provided %s" % (keyname, keyvalue)
raise cherrypy.HTTPError(400, msg)
else:
return_params[keyname] = cast_type(keyvalue)
class RequestSpec(RESTEntity):
def validate(self, apiobj, method, api, param, safe):
"""
Validate request input data.
Has to be implemented, otherwise the service fails to start.
If it's not implemented correctly (e.g. just pass), the arguments
are not passed in the method at all.
"""
validate_str("name", param, safe, rx.RX_REQUEST_NAME, optional=False)
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self, name):
"""
Spec templete API call.
:arg str name: name to appear in the result message.
:returns: row with response, here 1 item list with message.
"""
result = get_request_template_from_type(name)
return [result]
class WorkloadConfig(RESTEntity):
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
self.reqdb_url = "%s/%s" % (config.couch_host, config.couch_reqmgr_db)
def _validate_args(self, param, safe):
# TODO: need proper validation but for now pass everything
args_length = len(param.args)
if args_length == 1:
safe.kwargs["name"] = param.args[0]
param.args.pop()
else:
raise MethodWithoutQueryString
return
def validate(self, apiobj, method, api, param, safe):
"""
Validate request input data.
Has to be implemented, otherwise the service fails to start.
If it's not implemented correctly (e.g. just pass), the arguments
are not passed in the method at all.
"""
self._validate_args(param, safe)
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self, name):
"""
Workload config world API call.
:arg str name: name to appear in the result message.
:returns: row with response.
"""
helper = WMWorkloadHelper()
try:
helper.loadSpecFromCouch(self.reqdb_url, name)
except Exception:
raise cherrypy.HTTPError(404, "Cannot find workload: %s" % name)
return str(helper.data)
class WorkloadSplitting(RESTEntity):
def __init__(self, app, api, config, mount):
# main CouchDB database where requests/workloads are stored
RESTEntity.__init__(self, app, api, config, mount)
self.reqdb_url = "%s/%s" % (config.couch_host, config.couch_reqmgr_db)
def _validate_get_args(self, param, safe):
# TODO: need proper validation but for now pass everything
args_length = len(param.args)
if args_length == 1:
safe.kwargs["name"] = param.args[0]
param.args.pop()
elif args_length == 2 and param.args[0] == "web_form":
safe.kwargs["web_form"] = True
safe.kwargs["name"] = param.args[1]
param.args.pop()
param.args.pop()
elif args_length == 2 and param.args[0] == "update_only":
safe.kwargs["update_only"] = True
safe.kwargs["name"] = param.args[1]
param.args.pop()
param.args.pop()
else:
raise MethodWithoutQueryString
return
def validate(self, apiobj, method, api, param, safe):
"""
Validate request input data.
Has to be implemented, otherwise the service fails to start.
If it's not implemented correctly (e.g. just pass), the arguments
are not passed in the method at all.
"""
try:
if method == 'GET':
self._validate_get_args(param, safe)
if method == 'POST':
args_length = len(param.args)
if args_length == 1:
safe.kwargs["name"] = param.args[0]
param.args.pop()
except InvalidSpecParameterValue as ex:
raise ex
except Exception as ex:
msg = traceback.format_exc()
cherrypy.log("Error: %s" % msg)
if hasattr(ex, "message"):
if hasattr(ex.message, '__call__'):
msg = ex.message()
else:
msg = str(ex)
else:
msg = str(ex)
raise InvalidSpecParameterValue(msg)
@restcall(formats=[('text/plain', PrettyJSONFormat()), ('application/json', JSONFormat())])
@tools.expires(secs=-1)
def get(self, name, web_form=False, update_only=False):
"""
getting job splitting algorithm.
:arg str name: name to appear in the result message.
:returns: row with response, here 1 item list with message.
"""
helper = WMWorkloadHelper()
try:
helper.loadSpecFromCouch(self.reqdb_url, name)
except Exception:
raise cherrypy.HTTPError(404, "Cannot find workload: %s" % name)
splittingDict = helper.listJobSplittingParametersByTask(performance=False)
taskNames = sorted(splittingDict.keys())
splitInfo = []
for taskName in taskNames:
splitInfo.append({"splitAlgo": splittingDict[taskName]["algorithm"],
"splitParams": splittingDict[taskName],
"taskType": splittingDict[taskName]["type"],
"taskName": taskName})
if web_form:
splitInfo = create_web_splitting_format(splitInfo)
elif update_only:
splitInfo = create_updatable_splitting_format(splitInfo)
return splitInfo
@restcall(formats=[('application/json', JSONFormat())])
@tools.expires(secs=-1)
def post(self, name):
"""
Parse job splitting parameters sent from the splitting parameter update
page. Pull down the request and modify the new spec applying the
updated splitting parameters.
"""
data = cherrypy.request.body.read()
splittingInfo = json.loads(data)
cherrypy.log("Updating job splitting for '%s' with these args: %s" % (name, splittingInfo))
helper = WMWorkloadHelper()
try:
helper.loadSpecFromCouch(self.reqdb_url, name)
except Exception:
raise cherrypy.HTTPError(404, "Cannot find workload for: %s" % name)
splitSettings = ReqMgrConfigDataCache.getConfig("EDITABLE_SPLITTING_PARAM_CONFIG")
for taskInfo in splittingInfo:
splittingTask = taskInfo["taskName"]
splittingAlgo = taskInfo["splitAlgo"]
submittedParams = taskInfo["splitParams"]
splitParams = {}
for param in submittedParams:
validFlag, castType = _validate_split_param(splittingAlgo, param, splitSettings)
if validFlag:
_assign_key_value(param, submittedParams[param], splitParams, castType)
else:
msg = "Parameter '%s' is not supported in the algorithm '%s'" % (param, splittingAlgo)
raise cherrypy.HTTPError(400, msg)
helper.setJobSplittingParameters(splittingTask, splittingAlgo, splitParams, updateOnly=True)
# Now persist all these changes in the workload
url = "%s/%s" % (self.reqdb_url, name)
result = helper.saveCouchUrl(url)
return result
| dmwm/WMCore | src/python/WMCore/ReqMgr/Service/RequestAdditionalInfo.py | RequestAdditionalInfo.py | py | 11,646 | python | en | code | 44 | github-code | 13 |
38273661855 | from django.conf import settings
from django.test.signals import setting_changed
from django.utils.translation import gettext_lazy as _
from rest_framework.settings import APISettings, api_settings
USER_SETTINGS = getattr(settings, 'HEALTH_CHECK', None)
DEFAULTS = {
# View
'PERMISSION_CLASSES': api_settings.DEFAULT_AUTHENTICATION_CLASSES,
'AUTHENTICATION_CLASSES': api_settings.DEFAULT_PERMISSION_CLASSES,
'SERIALIZER_CLASS': 'django_k8s_health_check.serializer.HealthSerializer',
# SERVICE INFO
'SERVICE_NAME': None,
'SERVICE_VERSION': None,
'CHECK_DATABASE': True,
'CHECK_CACHE': True,
# Middleware
'HEADER_FIELD': 'X-Health',
'HEADER_VALUE': 'health-check',
'ALLOWED_PATHS': None,
'ALLOWED_HOSTS': None,
}
IMPORT_STRINGS = ('PERMISSION_CLASSES', 'AUTHENTICATION_CLASSES', 'SERIALIZER_CLASS')
REMOVED_SETTINGS = ()
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
def reload_api_settings(*args, **kwargs): # pragma: no cover
global api_settings
setting, value = kwargs['setting'], kwargs['value']
if setting == 'HEALTH_CHECK':
api_settings = APISettings(value, DEFAULTS, IMPORT_STRINGS)
setting_changed.connect(reload_api_settings)
| shinneider/django-k8s-health-check | django_k8s_health_check/settings.py | settings.py | py | 1,247 | python | en | code | 0 | github-code | 13 |
28095826273 | from tkinter import *
import mysql.connector
from tkinter import messagebox
from datetime import datetime
class FriendsPage(Frame):
def __init__(self, parent, controller):
Frame.__init__(self, parent)
self.configure(bg="gray8")
# connect to db and query for info
conn = mysql.connector.connect(user='root', password='trolldevelopers',
host='127.0.0.1',
database='darkspear')
mycursor = conn.cursor()
mycursor.execute("select username "
"FROM player")
myresult = list(mycursor)
player_list = []
i = 0
for name in myresult:
# player_list.append(name)
player_list = player_list + list(name)
i = i+1
# initialize frames
nav_frame = Frame(self)
nav_frame.configure(bg="gray8")
nav_frame.pack(side="left")
content_frame = Frame(self)
content_frame.configure(bg="gray8")
content_frame.pack()
friends_frame = Frame(content_frame)
friends_frame.configure(bg="gray8")
friends_frame.grid(row=2, column=0)
txt_frame = Frame(self)
txt_frame.configure(bg="gray8", highlightbackground="deep pink",
highlightcolor="deep pink", highlightthickness=3)
txt_frame.pack()
# initialize nav bar
home = Button(nav_frame, text="home", bg="midnight blue", fg="snow",
command=lambda: controller.show_frame(HomePage))
home.grid(row=1, column=0, pady=10)
clan = Button(nav_frame, text="clan", bg="midnight blue", fg="snow",
command=lambda: controller.show_frame(ClanPage))
clan.grid(row=2, column=0, pady=10)
quit_sub = Button(nav_frame, text="exit Darkspear", bg="midnight blue",
fg="snow", command=lambda: self.confirm_quit())
quit_sub.grid(row=8, column=0, pady=10)
# initialize content
txt = ""
user = 'Zuroke'
self.receiver = ""
# populate the list of friends
i = 0
for friend in player_list:
user_button = Button(friends_frame, text=friend, bg="midnight blue",
fg="snow", command=lambda friend=friend: self.get_txt(user,
friend, content_frame,))
user_button.grid(row=i, column=0)
i = i + 1
chat = Text(content_frame, height=40, width=100)
chat.configure(wrap="word", yscrollcommand="scroll.set", bg="grey8", fg="lawn green")
if txt == "":
chat_txt = "this is just a general palceholder to hold a list " \
"of the clan's chat"
else:
chat_txt = txt
chat.insert(END, chat_txt)
chat.grid(row=1, column=1, rowspan=2)
# make a scroll bar for the chat
scroll = Scrollbar(content_frame)
scroll.grid(row=1, column=2, rowspan=2, sticky="N E S W")
scroll.configure(command=chat.yview)
# initialize chat entry
usr = Label(txt_frame, text="user's name: ")
usr.config(bg="midnight blue", fg="snow")
usr.pack(side="left")
entry_txt = StringVar()
usr_txt = Entry(txt_frame, textvariable=entry_txt)
# entry_txt = usr_txt.get()
usr_txt.configure(width=130, bg="gray8", fg="lawn green")
usr_txt.pack(fill=X)
txt_sub = Button(txt_frame, text="Submit", bg="midnight blue", fg="snow",
command=lambda: self.update_txt(usr_txt.get(), user, self.receiver))
txt_sub.pack(side="right")
conn.close()
def update_txt(self, entry_txt, user, receiver):
# connect to the database
conn = mysql.connector.connect(user='root', password='trolldevelopers',
host='127.0.0.1',
database='darkspear')
mycursor = conn.cursor()
# generate insert
sql = ("INSERT INTO F_chat (p1_username, p2_username, sender, message) "
"VALUES (%s, %s, %s, %s)")
val = (user, receiver, user, entry_txt)
mycursor.execute(sql, val)
conn.commit()
conn.close()
def get_txt(self, player1, player2, content_frame):
self.receiver = player2
# create query for chat
conn = mysql.connector.connect(user='root', password='trolldevelopers',
host='127.0.0.1',
database='darkspear')
mycursor = conn.cursor()
chat_stmt = ("select sender, message, msg_time "
"from f_chat "
"where p1_username = %s and "
" p2_username = %s "
"order by msg_time")
data = (player1, player2)
mycursor.execute(chat_stmt, data)
fchat_txt = []
myresult = list(mycursor)
for txt in myresult:
fchat_txt.append(txt)
chat = Text(content_frame, height=40, width=100)
chat.configure(wrap="word", yscrollcommand="scroll.set", bg="gray8", fg="lawn green")
if fchat_txt == "":
chat_txt = "this is just a general palceholder to hold a list " \
"of the friends chat"
else:
chat_txt = fchat_txt
chat.insert(END, chat_txt)
chat.grid(row=1, column=1, rowspan=2)
conn.close()
def confirm_quit(self):
if messagebox.askyesno("verify", "really quit?"):
quit()
else:
messagebox.showinfo("no", "quit has been canceled")
def find_friend(self):
messagebox.showinfo("soon", "to be implemented later")
from pages.LoginPage import LoginPage
from pages.HomePage import HomePage
from pages.ClanPage import ClanPage
| rachellaurentidwell/darkspear | darkspear/pages/FriendsPage.py | FriendsPage.py | py | 6,213 | python | en | code | 0 | github-code | 13 |
2244673329 | """
Overview
========
Key-Commands
============
Namespace: assoc
Mode:
Event:
Description:
"""
from vyapp.app import root
def install(area):
area.install('assoc', ('NORMAL', '<Key-question>',
lambda event: root.status.set_msg('\n'.join(
event.widget.get_assoc_data()))))
| vyapp/vy | vyapp/plugins/assoc.py | assoc.py | py | 305 | python | en | code | 1,145 | github-code | 13 |
28679954305 | n,m = map(int,input().split())
def lastJudge(n : int, m : int) -> tuple:
# 크기가 n,m인 직사각형 기준으로
#현재 xy위치가 끝임
if m * n == 1: #둘다 1인 경우 계산대로 나옴
return (0,0,0) #그냥 자기 위치
elif m == 1:
return (1, n - 1, 0) # 아래쪽으로 한번 더 꺾을 수 있음 n만큼
elif n == 1:
return (0, 0, m - 1) # 꺾임은 그대로
elif n == 2: #위치는 무조건 2,1
return (2,1,0) # 무조건 두번꺾임 (우선순위 높음)
elif m == 2:
return (3,1,0) # 무조건 세번꺾임
rotate = ((min(n,m) + min(n,m)%2) // 2 - 1)
x,y = 1 + rotate,1 + rotate
n -= rotate * 2
m -= rotate * 2
#print(n,m)
edge,jx,jy = lastJudge(n,m)
print(4 * rotate + edge)
print(x + jx, y + jy)
| hodomaroo/BOJ-Solve | 백준/Gold/1959. 달팽이3/달팽이3.py | 달팽이3.py | py | 809 | python | ko | code | 2 | github-code | 13 |
16411493855 | from PIL import Image
import time
def get_concat_v(im1, im2):
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (0, im1.height))
return dst
im1 = Image.open('slice1.png')
im2 = Image.open('slice2.png')
time.sleep(10)
get_concat_v(im1, im2).save('result.png') | i-mostafa/publicProjects | netslicer-master/mergResults.py | mergResults.py | py | 326 | python | en | code | 0 | github-code | 13 |
5085813669 | import torch
import torch.nn.functional as F
from torch import nn
_ACTIVATIONS = {
"relu": nn.ReLU,
"gelu": nn.GELU,
}
_POOLING = {
"max": nn.MaxPool2d,
"avg": nn.AvgPool2d,
}
class ConvUnit(nn.Module):
def __init__(
self,
input_channels: int,
output_channels: int,
kernel_size: int,
stride: int,
padding: int,
pool_type: str,
pool_kernel_size: int,
pool_stride: int,
activ_type: str,
**activ_kwargs,
):
super().__init__()
self.conv = nn.Conv2d(
input_channels,
output_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
self.bn = nn.BatchNorm2d(output_channels)
self.activ = _ACTIVATIONS[activ_type](**activ_kwargs)
self.pool = _POOLING[pool_type](
kernel_size=pool_kernel_size, stride=pool_stride
)
def forward(self, x):
return self.pool(self.activ(self.bn(self.conv(x))))
class MNISTConvEncoder(nn.Module):
backbone_output_size = 196
def __init__(self, activ_type: str, pool_type: str):
super().__init__()
self.conv_unit1 = ConvUnit(
input_channels=1,
output_channels=4,
kernel_size=3,
stride=1,
padding=1,
pool_type=pool_type,
pool_kernel_size=2,
pool_stride=2,
activ_type=activ_type,
)
self.conv_unit2 = ConvUnit(
input_channels=4,
output_channels=4,
kernel_size=3,
stride=1,
padding=1,
pool_type=pool_type,
pool_kernel_size=2,
pool_stride=2,
activ_type=activ_type,
)
def forward(self, x: torch.Tensor):
out1 = self.conv_unit1(x)
out2 = self.conv_unit2(out1)
return out2.view(-1, self.backbone_output_size)
class LinearHead(nn.Module):
def __init__(self, input_dim: int, output_dim: int, dropout: float = 0):
super().__init__()
self.dropout = dropout
self.head = nn.Linear(input_dim, output_dim)
def forward(self, x: torch.Tensor):
out = self.head(x)
if self.dropout:
out = F.dropout(out, p=self.dropout)
return out
| RajatRasal/Contrastive-Learning-with-MNIST | src/components.py | components.py | py | 2,365 | python | en | code | 0 | github-code | 13 |
23602590152 | import os
from tqdm import tqdm
from numpy.lib.function_base import append
label_name1 = '/mnt/cephfs/home/chenguo/code/FAS/feathernet2021/feathernet_mine/data/train_file_list/exp_train_set_21060301_exp_20210603221606NIR_train_label.txt'
train_name1 = '/mnt/cephfs/home/chenguo/code/FAS/feathernet2021/feathernet_mine/data/train_file_list/exp_train_set_21060301_exp_20210603221606NIR_train.txt'
label_name2 = '/mnt/cephfs/home/chenguo/code/FAS/feathernet2021/feathernet_mine/data/train_file_list/exp_train_set_nir_210510_exp_20210510171724_train_label.txt'
train_name2 = '/mnt/cephfs/home/chenguo/code/FAS/feathernet2021/feathernet_mine/data/train_file_list/exp_train_set_nir_210510_exp_20210510171724_train.txt'
# 拿到四个list,遍历一个拼接到上一个,遍历的同时记录index,如果有重复记得把label也删了
def getlist(filename):
with open(filename,'r') as f:
list = f.read().splitlines()
return list
def savelist(filename,list):
with open(filename,'w') as f:
str='\n'
f.write(str.join(list))
list_train1 = getlist(train_name1)
list_train2 = getlist(train_name2)
list_train_label1 = getlist(label_name1)
list_train_label2 = getlist(label_name2)
print(len(list_train1))
print(len(list_train_label1))
print(len(list_train2))
print(len(list_train_label2))
list_check=[]
list_label_check=[]
# 整合filelist,去重
# print(‘3’ in list) 输出结果为True;
fold_path = '20210510_f3c_nir_print'
for i in tqdm( range(len((list_train2))) ):
# print(list_train1[i])
# 把list2里有,1里没有的拼到list1上去
if list_train2[i] not in list_train1:
if fold_path in list_train2[i]: # 指定文件夹下的才加入
# check
# list_check.append(list_train2[i])
# list_label_check.append(list_train_label2[i])
# merge
list_train1.append(list_train2[i])
list_train_label1.append(list_train_label2[i])
savelist('./train_file_list/0609_train_label.txt',list_train_label1)
savelist('./train_file_list/0609_train.txt',list_train1)
print(len(list_train1))
print(len(list_train_label1))
print(len(list_train2))
print(len(list_train_label2))
print(len(list_check))
print(len(list_label_check)) | CN1Ember/feathernet_mine | data/filelist_nir_rgb.py | filelist_nir_rgb.py | py | 2,276 | python | en | code | 1 | github-code | 13 |
8536381472 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
Created on Oct 30, 2017
@author: hadoop
'''
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import utils, encoders
from drawing.drawing_utils import draw_stock_with_candlestick_macd
from utils.log import log
from datetime import datetime
import mimetypes
import smtplib
import platform
import os
def get_attachment(filename):
'''
build attachment object
'''
fd = open(filename, 'rb')
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding or (mimetype is None):
mimetype = 'application/octet-stream'
maintype, subtype = mimetype.split('/')
if maintype == 'text':
retval = MIMEText(fd.read(), _subtype=subtype)
else:
retval = MIMEBase(maintype, subtype)
retval.set_payload(fd.read())
encoders.encode_base64(retval)
retval.add_header('Content-Disposition', 'attachment',
filename=filename.split('/')[-1])
fd.close()
return retval
def send_email(to_addr, subject, email_body, file_list=None):
'''
send the email to users
'''
html_start = '<font face="Courier New, Courier, monospace"><pre>'
html_end = '</pre></font>'
foot_msg = """
-------------------------------------------------------------------------<br/>
*** This message was auto-generated ***<br/>
*** If you believe it was sent incorrectly, please e-mail ***<br/>
*** Jinguang Liu (mailto:jliu@infinera.com) ***<br/>"""
smtp_server = 'bruins.infinera.com'
from_addr = 'jliu@infinera.com'
msg = MIMEMultipart()
msg['To'] = to_addr
msg['From'] = 'Jinguang Liu <jliu@infinera.com>'
msg['Subject'] = subject
msg['Date'] = utils.formatdate(localtime=1)
msg['Message-ID'] = utils.make_msgid()
message = html_start + email_body + foot_msg + html_end
body = MIMEText(message, _subtype='html', _charset='utf-8')
msg.attach(body)
# files = ['mime_get_basic.py']
if file_list is not None:
for filename in file_list:
msg.attach(get_attachment(filename))
s = smtplib.SMTP(smtp_server)
# s.set_debuglevel(1)
s.sendmail(from_addr, to_addr, msg.as_string())
def send_alert_email(entity, subject, body, k_type):
'''
Send email to alert
'''
code_id = entity.codeId
log.info("send alert email: " + code_id + " " + subject)
file_lst = []
if platform.system() == "Linux":
rdir = '/home/hadoop/quant/' + datetime.now().strftime("%Y-%m-%d")
else:
rdir = 'd:\\quant\\' + datetime.now().strftime("%Y-%m-%d")
if not os.path.exists(rdir):
os.mkdir(rdir)
fname = rdir + os.sep + k_type + "-" + code_id + "-" + \
entity.name.decode('utf-8').encode('gbk') + "-" + \
datetime.now().strftime("%Y-%m-%d-%H-%M-") + ".png"
if draw_stock_with_candlestick_macd(code_id, ("W", "D", "30", "15"), fname):
file_lst.append(fname)
if platform.system() == "Linux":
send_email("jliu@infinera.com", code_id + " " + subject, body)
if __name__ == '__main__':
pass | liujinguang/stockquantpro | stock-quant-pro/libs/utils/emails.py | emails.py | py | 3,276 | python | en | code | 0 | github-code | 13 |
18340239807 | import csv
import matplotlib.pyplot as plt
import numpy as np
# import data A - D
ta = [] # column 0
data1a = [] # column 1
tb = [] # column 0
data1b = [] # column 1
tc = [] # column 0
data1c = [] # column 1
td = [] # column 0
data1d = [] # column 1
with open('sigA.csv') as fa:
# open the csv file
readera = csv.reader(fa)
for rowa in readera:
# read the rows 1 one by one
ta.append(float(rowa[0])) # leftmost column
data1a.append(float(rowa[1])) # second column
sratea = len(data1a)/(ta[-1] - ta[0])
with open('sigB.csv') as fb:
# open the csv file
readerb = csv.reader(fb)
for rowb in readerb:
# read the rows 1 one by one
tb.append(float(rowb[0])) # leftmost column
data1b.append(float(rowb[1])) # second column
srateb = len(data1b)/(tb[-1] - tb[0])
with open('sigC.csv') as fc:
# open the csv file
readerc = csv.reader(fc)
for rowc in readerc:
# read the rows 1 one by one
tc.append(float(rowc[0])) # leftmost column
data1c.append(float(rowc[1])) # second column
sratec = len(data1c)/(tc[-1] - tc[0])
with open('sigD.csv') as fd:
# open the csv file
readerd = csv.reader(fd)
for rowd in readerd:
# read the rows 1 one by one
td.append(float(rowd[0])) # leftmost column
data1d.append(float(rowd[1])) # second column
srated = len(data1d)/(td[-1] - td[0])
# ----------------------------------------------------
# plot sigA
Fsa = sratea
Tsa = 1.0/Fsa; # sampling interval
tsa = np.arange(0,ta[-1],Tsa) # time vector
ya = data1a # the data to make the fft from
na = len(ya) # length of the signal
ka = np.arange(na)
Ta = na/Fsa
frqa = ka/Ta # two sides frequency range
frqa = frqa[range(int(na/2))] # one side frequency range
Ya = np.fft.fft(ya)/na # fft computing and normalization
Ya = Ya[range(int(na/2))]
# added for no. 5
newa = []
for ia in range(len(ya)):
avga = 0
suma = 0
for ja in range(800):
try:
suma = suma + ya[ia + ja]
except:
continue
avga = suma/800
newa.append(avga)
newYa = np.fft.fft(newa)/na # fft computing and normalization
newYa = newYa[range(int(na/2))]
# figa, (ax1a, ax2a) = plt.subplots(2, 1)
figa, ([ax1a, ax3a], [ax2a, ax4a]) = plt.subplots(2, 2)
ax1a.title.set_text('sigA: unfiltered [Signal v Time] plot')
# ax1a.plot(ta,ya,'b')
ax1a.plot(ta,ya,'k')
ax1a.set_xlabel('Time')
ax1a.set_ylabel('Amplitude')
figa.subplots_adjust(hspace=0.5)
figa.subplots_adjust(wspace=0.5)
# ax2a.loglog(frqa,abs(Ya),'b') # plotting the fft
ax2a.loglog(frqa,abs(Ya),'k') # plotting the fft
ax2a.title.set_text('sigA: unfiltered [FFT] plot')
ax2a.set_xlabel('Freq (Hz)')
ax2a.set_ylabel('|Y(freq)|')
ax3a.title.set_text('sigA: filtered [Signal v Time] (avg=800)')
# ax3a.plot(ta,newa,'b')
ax3a.plot(ta,newa,'r')
ax3a.set_xlabel('Time')
ax3a.set_ylabel('Amplitude')
# figa.subplots_adjust(hspace=0.5)
# ax4a.loglog(frqa,abs(newYa),'b') # plotting the fft
ax4a.loglog(frqa,abs(newYa),'r') # plotting the fft
ax4a.title.set_text('sigA: filtered [FFT] plot')
ax4a.set_xlabel('Freq (Hz)')
ax4a.set_ylabel('|Y(freq)|')
# plot sigB
Fsb = srateb
Tsb = 1.0/Fsb; # sampling interval
tsb = np.arange(0,tb[-1],Tsb) # time vector
yb = data1b # the data to make the fft from
nb = len(yb) # length of the signal
kb = np.arange(nb)
Tb = nb/Fsb
frqb = kb/Tb # two sides frequency range
frqb = frqb[range(int(nb/2))] # one side frequency range
Yb = np.fft.fft(yb)/nb # fft computing and normalization
Yb = Yb[range(int(nb/2))]
# added for no. 5
newb = []
for ib in range(len(yb)):
avgb = 0
sumb = 0
for jb in range(800):
try:
sumb = sumb + yb[ib + jb]
except:
continue
avgb = sumb/800
newb.append(avgb)
newYb = np.fft.fft(newb)/nb # fft computing and normalization
newYb = newYb[range(int(nb/2))]
figb, ([ax1b, ax3b], [ax2b, ax4b]) = plt.subplots(2, 2)
ax1b.title.set_text('sigB: unfiltered [Signal v Time] plot')
ax1b.plot(tb,yb,'k')
ax1b.set_xlabel('Time')
ax1b.set_ylabel('Amplitude')
figb.subplots_adjust(hspace=0.5)
figb.subplots_adjust(wspace=0.5)
ax2b.loglog(frqb,abs(Yb),'k') # plotting the fft
ax2b.title.set_text('sigB: unfiltered [FFT] plot')
ax2b.set_xlabel('Freq (Hz)')
ax2b.set_ylabel('|Y(freq)|')
ax3b.title.set_text('sigB: filtered [Signal v Time] (avg=800)')
ax3b.plot(tb,newb,'r')
ax3b.set_xlabel('Time')
ax3b.set_ylabel('Amplitude')
ax4b.loglog(frqb,abs(newYb),'r') # plotting the fft
ax4b.title.set_text('sigB: filtered [FFT] plot')
ax4b.set_xlabel('Freq (Hz)')
ax4b.set_ylabel('|Y(freq)|')
# plot sigC
Fsc = sratec
Tsc = 1.0/Fsc; # sampling interval
tsc = np.arange(0,tc[-1],Tsc) # time vector
yc = data1c # the data to make the fft from
nc = len(yc) # length of the signal
kc = np.arange(nc)
Tc = nc/Fsc
frqc = kc/Tc # two sides frequency range
frqc = frqc[range(int(nc/2))] # one side frequency range
Yc = np.fft.fft(yc)/nc # fft computing and normalization
Yc = Yc[range(int(nc/2))]
# added for no. 5
newc = []
for ic in range(len(yc)):
avgc = 0
sumc = 0
for jc in range(2800):
try:
sumc = sumc + yc[ic + jc]
except:
continue
avgc = sumc/2800
newc.append(avgc)
newYc = np.fft.fft(newc)/nc # fft computing and normalization
newYc = newYc[range(int(nc/2))]
figc, ([ax1c, ax3c], [ax2c, ax4c]) = plt.subplots(2, 2)
ax1c.title.set_text('sigC: unfiltered [Signal v Time] plot')
ax1c.plot(tc,yc,'k')
ax1c.set_xlabel('Time')
ax1c.set_ylabel('Amplitude')
figc.subplots_adjust(hspace=0.5)
figc.subplots_adjust(wspace=0.5)
ax2c.loglog(frqc,abs(Yc),'k') # plotting the fft
ax2c.title.set_text('sigC: unfiltered [FFT] plot')
ax2c.set_xlabel('Freq (Hz)')
ax2c.set_ylabel('|Y(freq)|')
ax3c.title.set_text('sigC: filtered [Signal v Time] (avg=2800)')
ax3c.plot(tc,newc,'r')
ax3c.set_xlabel('Time')
ax3c.set_ylabel('Amplitude')
ax4c.loglog(frqc,abs(newYc),'r') # plotting the fft
ax4c.title.set_text('sigC: filtered [FFT] plot')
ax4c.set_xlabel('Freq (Hz)')
ax4c.set_ylabel('|Y(freq)|')
# plot sigD
Fsd = srated
Tsd = 1.0/Fsd; # sampling interval
tsd = np.arange(0,td[-1],Tsd) # time vector
yd = data1d # the data to make the fft from
nd = len(yd) # length of the signal
kd = np.arange(nd)
Td = nd/Fsd
frqd = kd/Td # two sides frequency range
frqd = frqd[range(int(nd/2))] # one side frequency range
Yd = np.fft.fft(yd)/nd # fft computing and normalization
Yd = Yd[range(int(nd/2))]
# added for no. 5
newd = []
for id in range(len(yd)):
avgd = 0
sumd = 0
for jd in range(1000):
try:
sumd = sumd + yd[id + jd]
except:
continue
avgd = sumd/1000
newd.append(avgd)
newYd = np.fft.fft(newd)/nd # fft computing and normalization
newYd = newYd[range(int(nd/2))]
figd, ([ax1d, ax3d], [ax2d, ax4d]) = plt.subplots(2, 2)
ax1d.title.set_text('sigD: unfiltered [Signal v Time] plot')
ax1d.plot(td,yd,'k')
ax1d.set_xlabel('Time')
ax1d.set_ylabel('Amplitude')
figd.subplots_adjust(hspace=0.5)
figd.subplots_adjust(wspace=0.5)
ax2d.loglog(frqd,abs(Yd),'k') # plotting the fft
ax2d.title.set_text('sigD: unfiltered [FFT] plot')
ax2d.set_xlabel('Freq (Hz)')
ax2d.set_ylabel('|Y(freq)|')
ax3d.title.set_text('sigD: filtered [Signal v Time] (avg=1000)')
ax3d.plot(td,newd,'r')
ax3d.set_xlabel('Time')
ax3d.set_ylabel('Amplitude')
ax4d.loglog(frqd,abs(newYd),'r') # plotting the fft
ax4d.title.set_text('sigD: filtered [FFT] plot')
ax4d.set_xlabel('Freq (Hz)')
ax4d.set_ylabel('|Y(freq)|')
plt.show() | nbaptist16/me433 | hw2test/me433_hw2_5.py | me433_hw2_5.py | py | 7,515 | python | en | code | 0 | github-code | 13 |
28147081447 | import unittest
from character_sheet import *
class MyTests(unittest.TestCase):
def test_calc_level(self):
input_and_expected = (
(0, 1),
(14000, 6),
(265000, 18)
)
for inp, expected in input_and_expected:
self.assertEqual(calc_level(inp), expected)
def test_calc_prof_bonus(self):
input_and_expected = (
(1, 2),
(6, 3),
(18, 6),
)
for inp, expected in input_and_expected:
self.assertEqual(calc_prof_bonus(inp), expected)
def test_calc_ability_modifiers(self):
input_and_expected = (
(AbilityScores(12, 13, 11, 16, 18, 13), AbilityModifiers(1, 1, 0, 3, 4, 1)),
(AbilityScores(15, 11, 13, 17, 14, 15), AbilityModifiers(2, 0, 1, 3, 2, 2)),
)
for inp, expected in input_and_expected:
self.assertEqual(calc_ability_modifier(inp), expected)
def test_calc_saving_throws(self):
input_and_expected = (
(
{
"ability_mods": AbilityModifiers(0, 0, 0, 0, 0, 0),
"char_class": bard,
"prof_bonus": 2
},
SavingThrows(str=0, dex=2, con=0, int=0, wis=0, cha=2)
),
(
{
"ability_mods": AbilityModifiers(2, 3, 1, -2, -1, 0),
"char_class": monk,
"prof_bonus": 4
},
SavingThrows(str=6, dex=7, con=1, int=-2, wis=-1, cha=0)
)
)
for inp, expected in input_and_expected:
self.assertEqual(calc_saving_throws(**inp), expected)
def test_calc_skills(self):
inp = {
"ability_mods": AbilityModifiers(
str=0,
dex=1,
con=2,
int=3,
wis=4,
cha=5),
"skill_profs": ("acrobatics", "arcana", "deception"),
"prof_bonus": 2
}
expected = SkillScores(
acrobatics=3,
animal_handling=4,
arcana=5,
athletics=0,
deception=7,
history=3,
insight=4,
intimidation=5,
investigation=3,
medicine=4,
nature=3,
perception=4,
performance=5,
persuasion=5,
religion=3,
sleight_of_hand=1,
stealth=1,
survival=4
)
self.assertEqual(calc_skills(**inp), expected)
if __name__ == '__main__':
unittest.main() | Mubbly/5edCharacterSheet | tests.py | tests.py | py | 2,670 | python | en | code | 0 | github-code | 13 |
14885806359 | def spy_game(nums):
str_nums = ''
for i in nums:
str_nums+= str(i)
tmp_str = str_nums
null1 = tmp_str.find("0")
tmp_str.replace("0", "", 1)
null2 = tmp_str.find("0")
seven = tmp_str.find("7")
if (str_nums.find("007") != -1) or (null1 <= null2 < seven):
print(True)
return True
else:
print(False)
return False
spy_game([1,2,4,0,0,7,5]) # --> True
spy_game([1,0,2,4,0,5,7]) # --> True
spy_game([1,7,2,0,4,5,0]) # --> False | AbdullaAzadov/PP2 | week_3/lab3/functions1/task8.py | task8.py | py | 558 | python | en | code | 0 | github-code | 13 |
32212357290 | def solution(board, skill):
answer = 0
x=len(board)
y=len(board[0])
accum_sum=[ [ 0 for _ in range(y+1)] for _ in range(x+1)]
for type_skill, r1,r2, c1,c2, degree in skill:
c1+=1
c2+=1
if type_skill==1:
accum_sum[r1][r2]-=degree
accum_sum[c1][c2]-=degree
accum_sum[r1][c2]+=degree
accum_sum[c1][r2]+=degree
else:
accum_sum[r1][r2]+=degree
accum_sum[c1][c2]+=degree
accum_sum[r1][c2]-=degree
accum_sum[c1][r2]-=degree
# 위 → 아래
for w in range(x):
for h in range(y):
accum_sum[w][h+1]+=accum_sum[w][h]
# 왼쪽 → 오른쪽
for w in range(y):
for h in range(x):
accum_sum[h+1][w]+=accum_sum[h][w]
for i in range(x):
for j in range(y):
result=board[i][j]+accum_sum[i][j]
if result > 0 :
answer+=1
return answer
print(solution([[5,5,5,5,5],[5,5,5,5,5],[5,5,5,5,5],[5,5,5,5,5]],
[[1,0,0,3,4,4],[1,2,0,2,3,2],[2,1,0,3,1,2],[1,0,1,3,3,1]]))
print(solution([[1,2,3],[4,5,6],[7,8,9]],[[1,1,1,2,2,4],[1,0,0,1,1,2],[2,2,0,2,0,100]]))
''' 2차원 → 1차원으로 바꿔서
import itertools
def solution(board, skill):
answer = 0
row = len(board[1])
new_board=list(itertools.chain(*board))
for type_skill,r1,r2,c1,c2,degree in skill:
start_point=(r1*row)+r2
end_point=(c1*row)+c2
for i in range(start_point,end_point+1):
if type_skill==1:
new_board[i]-=degree
else:
new_board[i]+=degree
for i in range(0,len(new_board)):
if new_board[i]>0:
answer+=1
return answer
''' | BlueScreenMaker/333_Algorithm | 백업/220604~230628/Programmers/파괴되지 않는 건물.py | 파괴되지 않는 건물.py | py | 1,777 | python | en | code | 0 | github-code | 13 |
43104799442 | import csv
import random
dataPath = './train_data/dga-feed.txt'
resultPath = './train_data/binary_training.txt'
dgaTypePath = './conf/dga/dga_type_list.txt'
#DGA is marked with 1, whitelist websites are marked with 0
with open(dataPath, "r") as f:
data = f.read().split('\n')
for i, line in enumerate(data):
data[i] = line.split(',')[0] + ',' + line.strip('\n').strip('\r').strip(' ').split('Domain used by ')[1].split(' for ')[0].split(',')[0].split('(')[0].split(' - ')[0].split(' DGA')[0].lower().strip(' ')
f_dga = open(dgaTypePath, "a")
with open('./white_list_data/white_list.csv') as whiteList:
readCSV = csv.reader(whiteList, delimiter=',')
for row in readCSV:
if row[1] == 'Domain':
continue
data.append(row[1]+',0')
#there are 847622 samples of DGA so we pick 837622 samples of whitelist sites
if len(data) >= 847622*2 :
break
random.shuffle(data)
f_result = open(resultPath, "a")
for a in data:
f_result.write(a + "\n") | Jingyuanisxzs/dga_detect_lstm | generate_data_with_white_list.py | generate_data_with_white_list.py | py | 1,012 | python | en | code | 0 | github-code | 13 |
24534408840 | from ds_templates import test_series as ts
from test_cases import tests
"""
1) Parse through list word by word, sorting each word to obtain a 'key'
2) Use sorted word as key for 'anas' and the value is a list with words within.
3) One full list has been parsed, iterate through anas.values() and append to results list
"""
def group_anagrams(strs: list[str]) -> list[list[str]]:
anas, res = {}, []
for word in strs:
key = ''.join(sorted(word))
anas.setdefault(key, [])
anas[key].append(word)
for lst in anas.values():
res.append(lst)
return res
strs1 = ['stuff', 'fusft', 'ffust', 'cats', 'stac', 'atcs', 'dogg', 'god']
print(group_anagrams(strs1))
| Hintzy/leetcode | Medium/49_group_anagrams/group_anagrams.py | group_anagrams.py | py | 701 | python | en | code | 0 | github-code | 13 |
41420985869 | """
空气质量计算AQI
作者:Yang
功能:AQI计算.
新增功能:读取CSV文件.
新增功能:读取文件,判断格式调取相应的操作,利用OS模块.
新增功能:爬虫-网页访问.
版本:5.0
日期:31/08/2018
"""
import requests
def get_html_text(url):
"""
返回url的文本
"""
r = requests.get(url, timeout=5)
print(r.status_code)
return r.text
def read_city_name(filepath):
f = open(filepath, "r")
# 读取全部内容 ,并以列表方式返回
for line in f.readlines():
line = line.strip('\n')
return line
print(line)
f.close()
def main():
"""
主函数
"""
filepath = 'city_name.txt'
city_pinyin = read_city_name(filepath)
print(city_pinyin)
# city_pinyin = input('请输入城市拼音:')
url = 'http://pm25.in/' + city_pinyin
print(url)
url_text = get_html_text(url)
aqi_div = """<div class="span12 data">
<div class="span1">
<div class="value">
"""
index = url_text.find(aqi_div)
begin_index = index + len(aqi_div)
end_index = begin_index + 2
aqi_val = url_text[begin_index: end_index]
print('空气质量为:{}'.format(aqi_val))
if __name__ == '__main__':
main() | Lighthouse-Yang/python_learning_test | AQI/AQI_5.0.py | AQI_5.0.py | py | 1,374 | python | zh | code | 1 | github-code | 13 |
70095269137 | # def get_url(url):
# # do something 1
# html = get_html(url) # 耗IO的操作,等待网络请求,此处暂停,切换到其他函数
# # parse html
# urls = parse_url(html)
"""
传统的函数调用过程:A->B->C,栈
我们需要一个可以暂停的函数,并且可以在适当的情况下恢复该函数
出现了协成->有多个入口的函数,可以暂停的函数(可以向暂停的地方传入值),生成器yield
"""
def gen_func():
# 可以产出值,可以接收值(调用方传递进来的值)
html = yield "https://baidu.com"
print(html)
yield 2
yield 3
return "bobby"
"""
1.生成器不只是可以产出值,还可以接收值
1.启动生成器的方式有两种,next(),send()
"""
if __name__ == "__main__":
gen = gen_func()
url = next(gen)
# 第一次调用gen的方法且为send时,只能传递None
# 在调用send发送非none值之前,我们必须启动一次生成器方式有两种:1.gen.send(None) 2.next(gen)
html = "vhbn"
print(gen.send(html))
print(url)
# send可以传递值进入生成器内部,同时还可以重启生成器执行下一个yield的位置
# print(next(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen)) | Zbiang/Python-IO | multi-threaded and multi-process/coroutine.py | coroutine.py | py | 1,306 | python | zh | code | 0 | github-code | 13 |
27152233454 | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
#app_name = "repository"
urlpatterns = [
path('<int:id>', views.index, name='repository'),
path('newRepository', views.newRepository, name='newRepository'),
path('all_repositories', views.all_repositories, name = 'all_repositories'),
path('addRepository/', views.addRepository, name = 'addRepository'),
path('editRepository/', views.editRepository, name = 'editRepository'),
path('transferToEditRepository/<int:id>', views.transferToEditRepository, name='transferToEditRepository'),
path('deleteRepository/<int:id>', views.deleteRepository, name='deleteRepository'),
path('collaborators/<int:id>', views.collaborators, name='collaborators'),
path('<int:id>/<int:branch_id>', views.repo_branch, name='repo_branch'),
path('watchRepository/<int:id>', views.watchRepository, name = 'watchRepository'),
path('watchers/<int:id>', views.watchers, name='watchers'),
path('starRepository/<int:id>', views.starRepository, name = 'starRepository'),
path('stargazers/<int:id>', views.stargazers, name='stargazers'),
path('forkRepository/<int:id>', views.forkRepository, name = 'forkRepository'),
path('forkers/<int:id>', views.forkers, name='forkers'),
path('remove_collaborator/<int:id>/<int:developer_id>', views.remove_collaborator, name='remove_collaborator'),
path('add_collaborator/<int:id>/<int:developer_id>', views.add_collaborator, name='add_collaborator'),
path('repo_developer/<int:id>/<int:developer_id>', views.repo_developer, name='repo_developer'),
path('searchInThisRepo/<int:id>', views.search_in_this_repo, name = 'search_in_this_repo'),
path('searchedRepoIssues/<int:id>', views.searched_repo_issues, name = 'searched_repo_issues'),
path('searchedRepoCommits/<int:id>', views.searched_repo_commits, name = 'searched_repo_commits'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| marijamilanovic/UksGitHub | Uks/repository/urls.py | urls.py | py | 2,022 | python | en | code | 0 | github-code | 13 |
18769405092 | from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from orders.models import Order, OrderItem
from .forms import LoginForm, ProfileForm, UserForm, UserRegistrationForm
from .models import Profile
@login_required
def dashboard(request):
user = request.user
profile = get_object_or_404(Profile, user=user.id)
return render(request, 'account/dashboard.html', {'section': 'dashboard', 'user': user, 'profile': profile})
def register(request):
if request.method == "POST":
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
new_user = user_form.save(commit=False)
new_user.set_password(user_form.cleaned_data['password'])
new_user.save()
Profile.objects.create(user=new_user)
return render(request, 'account/register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'account/register.html', {'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserForm(instance=request.user, data=request.POST)
profile_form = ProfileForm(instance=request.user.profile, data=request.POST, files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, 'account/profile.html', {'user_form': user_form, 'profile_form': profile_form})
@login_required
def orders(request):
user = request.user
orders = user.orders.filter(user=user.id).values()
profile = get_object_or_404(Profile, user=user.id)
context = {'user': user, 'profile': profile, 'orders': orders}
return render(request, 'account/orders.html', context) | JeffersonRolino/xpiece | account/views.py | views.py | py | 2,056 | python | en | code | 0 | github-code | 13 |
4346851923 | # 13305: 주유소
n = int(input()) # 도시의 수
km = list(map(int, input().split())) # 각 도시 사이 거리 (n-1)개
city = list(map(int, input().split())) # n개 도시
result = km[0] * city[0]
for i in range(1, n-1) :
if city[i-1] > city[i] :
result += km[i] * city[i]
else : # 전것이 다음것보다 더 싸면 그대로 유지
city[i] = city[i-1]
result += km[i] * city[i]
print(result)
| mosePark/Algorithm | 그리디/13305: 주유소.py | 13305: 주유소.py | py | 436 | python | ko | code | 0 | github-code | 13 |
37164760463 | import os
from .curl import (
curl_available,
PycurlTransport,
)
from .requests import requests_multipart_post_available
from .ssh import (
rsync_get_file,
rsync_post_file,
scp_get_file,
scp_post_file,
)
from .standard import UrllibTransport
if curl_available:
from .curl import (
get_file,
post_file,
)
elif requests_multipart_post_available:
from .requests import (
get_file,
post_file,
)
else:
from .poster import (
get_file,
post_file,
)
def get_transport(transport_type=None, os_module=os, transport_params=None):
transport_type = _get_transport_type(transport_type, os_module)
if not transport_params:
transport_params = {}
if transport_type == 'urllib':
transport = UrllibTransport(**transport_params)
else:
transport = PycurlTransport(**transport_params)
return transport
def _get_transport_type(transport_type, os_module):
if not transport_type:
use_curl = os_module.getenv('PULSAR_CURL_TRANSPORT', "0")
# If PULSAR_CURL_TRANSPORT is unset or set to 0, use default,
# else use curl.
if use_curl.isdigit() and not int(use_curl):
transport_type = 'urllib'
else:
transport_type = 'curl'
return transport_type
__all__ = (
'get_transport',
'get_file',
'post_file',
'rsync_get_file',
'rsync_post_file',
'scp_get_file',
'scp_post_file',
)
| galaxyproject/pulsar | pulsar/client/transport/__init__.py | __init__.py | py | 1,489 | python | en | code | 37 | github-code | 13 |
34420981898 | # coding=utf-8
"""Compute the solution of the Day 1: Calorie Counting puzzle."""
# Standard library imports:
from pathlib import Path
# Local application imports:
from aoc_tools import read_puzzle_input
from aoc2022.day_1.tools import ExpeditionSupplies
def compute_solution() -> tuple[int, int]:
"""Compute the answers for the two parts of this day."""
input_file = Path(__file__).parents[1] / "day_1/puzzle_input.txt"
lines = read_puzzle_input(input_file=input_file)
expedition = ExpeditionSupplies(calories_list=lines)
elves = expedition.sort_elves_by_calories()
top_one_calories = elves[0].total_calories
top_three_calories = sum(elf.total_calories for elf in elves[:3])
return top_one_calories, top_three_calories
| JaviLunes/AdventCode2022 | src/aoc2022/day_1/solution.py | solution.py | py | 755 | python | en | code | 0 | github-code | 13 |
30365096821 | from __future__ import print_function, unicode_literals, division
def aufg1():
import ROOT as r
import numpy as np
import math as m
GAMMA = 2.7 - 1
# Datei
datei = r.TFile("NeutrinoMC.root", "RECREATE")
############################
#a
############################
data = np.zeros((4, 1))
tree_a = r.TTree("Signal_MC", "Signal_MC")
tree_a.Branch("Energie", data[0], "x/D")
#Signale generieren: gibt ein TH1D-Objekt zurueck. xmin: untere Grenze, n:anzahl der Ereignisse
def signalereignis_hist(xmin, n):
x_min = np.power(xmin, GAMMA)
x_max = 0
#Zufallsgenerator erzeugen
rnd1 = r.TRandom3(1);
#Histogramm fuer das Signal
histogramm_ort = r.TH1D("histogramm_ort","histogramm_ort", 30, np.log10(xmin), 4)
histogramm_ort.GetXaxis().SetTitle("log(E/TeV)")
histogramm_ort.GetYaxis().SetTitle("Anzahl Ereignisse")
#Zufallszahlen zwischen x_max un x_min erzeugen und in Histogramm speichern
for i in range(n):
data[0] = 1./np.power(x_min+rnd1.Rndm()*(x_max-x_min), 1./GAMMA)
tree_a.Fill()
histogramm_ort.Fill(np.log10(data[0]))
return histogramm_ort
tree_a.Write()
#gewuenschte Anzahl N der Signalereignisse
N = np.int_(1e5)
xmin = 1.0
xmax = 1e6
#Datei
canv_signal = r.TCanvas("canv_signal", "canv_signal", 800, 600)
canv_signal.SetLogy()
signal = signalereignis_hist(xmin, N)
signal.DrawCopy()
canv_signal.SaveAs("Blatt3A1a.pdf")
#signal.Write()
#canv_signal.Write()
#b
#Erzeugung eines neuen Tree
tree_b = r.TTree("Signal_MC_Akzeptanz", "Signal_MC_Akzeptanz")
#Abspeichern der Ergebnisse
tree_b.Branch("Signal_MC_Akzeptanz", data[0], "x/D")
#Berücksichtigung der Akzeptanz mit der Neumannmethode
def akzeptanz_hist(xmin, n):
x_min = np.power(xmin, GAMMA)
x_max = 0
#Zufallsgenerator mit gleichem Seed wie im ersten Aufgabenteil
rnd1 = r.TRandom3(1)
#Zufallsgenerator für die Akzeptanz
rnd2 = r.TRandom3(2)
#Erzeugung eines TH1D Objekts
hist_akzeptanz = r.TH1D("hist_akzeptanz", "hist_akzeptanz", 20, np.log10(xmin), 4)
hist_akzeptanz.GetXaxis().SetTitle("log(E/TeV)")
hist_akzeptanz.GetYaxis().SetTitle("Anzahl Ereignisse")
for i in range(n):
ereignis = 1./np.power(x_min+rnd1.Rndm()*(x_max-x_min), 1./GAMMA) #Generation der Signalevents wie im ersten Teil
y = np.power((1 - m.exp(-1*ereignis/2)),3); #Einsetzen der Events in Akzeptanzfunktion
y_rndm = rnd2.Rndm(); #Erzeugung von gleichverteilten Zahlen
if (y_rndm <= y): #Anwendung des Rückweisungskriteriums
hist_akzeptanz.Fill(np.log10(ereignis))
data[1] = ereignis
tree_b.Fill()
return hist_akzeptanz
tree_b.Write()
#Erzeugung eines Canvas
canv_akzeptanz = r.TCanvas("canv_akzeptanz", "canv_akzeptanz", 800,600)
canv_akzeptanz.SetLogy()
akzeptanz = akzeptanz_hist(xmin, N)
akzeptanz.DrawCopy()
canv_akzeptanz.SaveAs("Blatt3A1b.pdf")
#c
#Standardabweichung
sigma = 0.2
def energiemessung_hist(xmin, n, sigma):
x_min = np.power(xmin, GAMMA)
x_max = 0
rnd1 = r.TRandom3(1)
rnd2 = r.TRandom3(2)
rnd3 = r.TRandom3(3)
hist_energie = r.TH1D("hist_energie", "hist_energie", 20, 0, 5)
hist_energie.GetXaxis().SetTitle("log(Anzahl Hits)")
hist_energie.GetYaxis().SetTitle("Anzahl Ereignisse")
for i in range(n):
ereignis = 1./np.power(x_min + rnd1.Rndm()*(x_max - x_min), 1./GAMMA)
y = np.power((1-m.exp(-1*ereignis/2)),3)
y_rndm = rnd2.Rndm()
if (y_rndm <= y):
while(1): #immer true
v1 = rnd3.Rndm()*2 - 1
v2 = rnd3.Rndm()*2 - 1
q = v1*v1 + v2*v2
if ((q>=1) or (q==0)): #Check ob Rückweisungsbedingung erfüllt ist
continue
else:
z1 = m.sqrt((-2*np.log(q))/q) #Gaussverteilte Zufallszahl
x1 = v1*z1
#Transformation der Gaussverteilten von (0,1) auf (E,0.2E)
x1 = sigma*ereignis*x1 + ereignis
hits = np.int_(10*x1)
if(hits>0):
hist_energie.Fill(np.log10(hits))
break
return hist_energie
canv_energie = r.TCanvas("canv_energie", "canv_energie", 800,600)
canv_energie.SetLogy()
energie = energiemessung_hist(xmin, N, sigma)
energie.DrawCopy()
canv_energie.SaveAs("Blatt3A1c.pdf")
#d
tree_d = r.TTree("Signal_MC_Akzeptanz", "Signal_MC_Akzeptanz")
tree_d.Branch("AnzahlHits", data[1], "b/D")
tree_d.Branch("x", data[2], "x/D")
tree_d.Branch("y", data[3], "y/D")
def ortsmessung_hist(xmin, n, sigma):
x_min = np.power(xmin, 1.7)
x_max = 0
rnd1 = r.TRandom3(1)
rnd2 = r.TRandom3(2)
rnd3 = r.TRandom3(3)
rnd4 = r.TRandom3(4)
hist_ort = r.TH2D("hist_ort", "hist_ort", 100, 0, 10,100,0,10)
hist_ort.GetXaxis().SetTitle("x")
hist_ort.GetYaxis().SetTitle("y")
mittelwert1 = 7.0
mittelwert2 = 3.0
for i in range(n):
ereignis = 1./np.power(x_min + rnd1.Rndm()*(x_max - x_min), 1./GAMMA)
data[0] = ereignis
y = np.power((1-m.exp(-1*ereignis/2)),3)
y_rndm = rnd2.Rndm()
if (y_rndm <= y):
while(1): #immer true, Abbruchbedingung weiter unten
u = rnd3.Rndm()*2 - 1
v = rnd3.Rndm()*2 - 1
q = u*u + v*v
if(q>=1 or q==0):
continue
else:
p = m.sqrt((-2*np.log(q))/q)
x1 = u*p
#Trafo von (0,1) auf (E,0.2E)
x1 = sigma*ereignis*x1 + ereignis
hits = np.int_(10*x1)
#bis hierhin der gleiche Code wie in a)-c)
if (hits>0):
data[1] = hits
sigma_ort = 1./np.log10(hits)
#Fluktuation der Zufallszahlen um die Mittelwerte 7 und 3 herum
x_ort = rnd4.Gaus()*sigma_ort + mittelwert1
data[2] = x_ort
y_ort = rnd4.Gaus()*sigma_ort + mittelwert2
data[3] = y_ort
tree_d.Fill()
hist_ort.Fill(x_ort, y_ort) #Schreibe die Ortskoordinaten in Histogramm
break
return hist_ort
canv_ort = r.TCanvas("canv_ort", "canv_ort", 800,600)
tree_d.Write()
ort = ortsmessung_hist(xmin, N, sigma)
ort.DrawCopy("COL")
canv_ort.SaveAs("Blatt3A1d.pdf")
#ort.Write()
#canv_ort.Write()
#e
treee = r.TTree("Untergrund_MC", "Untergrund_MC")
treee.Branch("Energie", data[0], "a/D")
treee.Branch("untergrund_events", data[1], "b/D")
treee.Branch("x", data[2], "x/D")
treee.Branch("y", data[3], "y/D")
def untergrund_hist(n):
rnd5 = r.TRandom3(5)
sigma1 = 3.
mittelwert1 = 5.
sigma2 = 3.
mittelwert2 = 5.
rho = 0.5
hist_untergrund = r.TH2D("hist_untergrund", "hist_untergrund", 100, 0, 10, 100 , 0, 10)
hist_uenergie = r.TH1D("hist_uenergie","hist_uenergie",20,0,4)
hist_uenergie.GetXaxis().SetTitle("log(Anzahl Hits)");
hist_uenergie.GetYaxis().SetTitle("Anzahl Ereignisse");
for i in range(n):
#zufaellige Anzahl der Hits
untergrund_events = rnd5.Gaus(2,1)
hist_uenergie.Fill(untergrund_events)
x = rnd5.Gaus()
y = rnd5.Gaus()
#Transformation der Zufallszahlen mit den angegebenen Formeln
x1 = m.sqrt(1.- rho*rho)*sigma1*x + rho*sigma1*y + mittelwert1
y1 = sigma2*y + mittelwert2
hist_untergrund.Fill(x1,y1)
data[1] = untergrund_events
data[2] = x
data[3] = y
treee.Fill()
return hist_untergrund, hist_uenergie
untergrund, uenergie = untergrund_hist(100*N)
canv_untergrund = r.TCanvas("canv_untergrund", "canv_untergrund", 800,600)
untergrund.DrawCopy("COL")
canv_untergrund.SaveAs("Blatt3A1e1.pdf")
#untergrund.Write()
#canv_untergrund.Write()
treee.Write()
canv_uenergie = r.TCanvas("canv_uenergie", "canv_uenergie", 800,600)
uenergie.DrawCopy()
canv_uenergie.SaveAs("Blatt3A1e2.pdf")
#uenergie.Write()
#canv_uenergie.Write()
datei.Close()
if __name__ == '__main__':
aufg1()
| chasenberg/smd1516 | sheet3/aufgabe1.py | aufgabe1.py | py | 9,193 | python | de | code | 0 | github-code | 13 |
28989726634 | '''
https://leetcode.com/problems/letter-combinations-of-a-phone-number/
'''
def letterCombinations(A):
letter_pad = {'0':['0'],'1':['1'],'2':['a','b','c'],'3':['d','e','f'],'4':['g','h','i'],
'5':['j','k','l'],'6':['m','n','o'],'7':['p','q','r','s'],'8':['t','u','v'],
'9':['w','x','y','z']}
res = []
n = len(A)
if n == 0:
return []
def _solve(index1,_res):
if len(_res) == n:
res.append(_res)
return
for i in range(index1,n):
digit = letter_pad[A[i]]
for j in range(len(digit)):
_solve(i+1,_res+digit[j])
_solve(0,"")
return res
print(letterCombinations("23")) | riddheshSajwan/data_structures_algorithm | recursion/letterPhone.py | letterPhone.py | py | 719 | python | en | code | 1 | github-code | 13 |
7702145221 | # Author:成为F
# -*- codeing = utff-8 -*-
# @Time : 2020/11/7 19:52
# @Author : 成为F
# @File : 正则1.py
# @Software : PyCharm
import requests
import re
import os
if __name__ == '__main__':
headers = {
'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36'
}
url = 'https://www.qiushibaike.com/imgrank/page/%d/'
for pagenum in range(1,3):
new_url = format(url%pagenum)
page_text = requests.get(url=new_url,headers=headers).text
#正则表达式寻找图片url内容
ex = '<div class="thumb">.*?<img src="(.*?)" alt=.*?</div>'
img_src_list = re.findall(ex,page_text,re.S)
#print(img_src_list)
#创建文件夹保存图片
if not os.path.exists('./正则1图片'):
os.mkdir('./正则1图片')
for src in img_src_list:
src = 'https:'+src
img_data = requests.get(url=src,headers=headers).content
#生成图片名称
img_name = src.split('/')[-1]
#图片地址
img_path = './正则1图片/'+img_name
with open(img_path,'wb')as fp:
fp.write(img_data)
print(img_name,'over')
| pale-F/Reptile | pythin/venv/str/数据解析/正则1.py | 正则1.py | py | 1,299 | python | en | code | 0 | github-code | 13 |
8573240044 | # compose_flask/app.py
from flask import Flask
from redis import Redis
app = Flask(__name__)
redis = Redis(host='redis', port=6379)
@app.route('/')
def hello():
redis.incr('hits')
counter = redis.get('hits').decode('utf-8')
return f"This Compose/Flask demo has been viewed {counter} time(s)."
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| EekaMau5/compose_flask | app.py | app.py | py | 377 | python | en | code | 0 | github-code | 13 |
18120130656 | import sys, os, subprocess
from characters import Characters
# from board import Board
#mario cannot go out of window!!!!!!!!!!!!!!!!!!
class Mario(Characters):
def __init__(self, char, height, width):
Characters.__init__(self, char, height, width)
self.matrix = [['(', '^', '^', ')'], ['/', ']', '[', '\\']]
# self.char = "Mario"
self.height = height
self.width = width
self.lives = 3
self.dx = 0
def FindDx(self, board, y):
if self.y <= board.frame_width//2:
self.dx = 0
elif self.y >= 354:
self.dx = self.y - 400
else:
self.dx = self.y - (board.frame_width//2)
return self.dx
def checkDeath(self, enemy, pit1, pit2, board):
for e in enemy:
if (self.y == e.y+4 or self.y+4 == e.y) and (self.x == e.x or self.x == e.x+1 or self.x==e.x-1):
self.lives -= 1
self.NewPosition(board,26,1)
for p in pit1:
if (self.y == p.y) and (self.x+2 == p.x):
self.lives -= 1
self.NewPosition(board,26,1)
for p2 in pit2:
if (self.y == p2.y) and (self.x+2 == p2.x):
self.lives -= 1
self.NewPosition(board,26,1)
if self.lives == 0:
self.destroy(board)
def destroy(self, board):
self.matrix = [['(', 'X', 'X', ')'], [' ', ']', '[', ' ']]
board.place (self, self.x, self.y)
os.system('clear')
subprocess.Popen(['osascript', './stopscript'])
print(board.returnStringBoard(self))
print("SORRY, YOU HAVE DIED.\nGAME OVER\n")
sys.exit() | hellomasaya/Mario-game | mario.py | mario.py | py | 1,686 | python | en | code | 0 | github-code | 13 |
26414802042 | from ursina import *
import Game
from ...Test import Test
from ...TestTypes import TestTypes
from Overlays.Notification import Notification
from Content.Enemies.TestEnemy import TestEnemy
from Content.Characters.TestCharacter import TestCharacter
from Content.Weapons.Knife.Knife import Knife
class Fight(): # just a name class
pass
class FightTest(Test):
def __init__(self):
super().__init__(Fight, TestTypes.NotScreenTest)
self.on_load += self._load
self.on_unload += self._unload
self._player = None
self._player: TestCharacter
self._enemy = None
self._enemy: TestEnemy
def _load(self):
def set_scene():
self._player = TestCharacter(position=(0, -2.5))
self._player.apply_effect(Game.content_manager.get_effect("Stun"))
self._player.scale = (1.5, 1.5)
self._player.swap_weapon(Knife())
self._enemy = TestEnemy(position=(0, 0))
self._enemy.apply_effect(Game.content_manager.get_effect("Stun"))
self._enemy.scale = (1.5, 1.5)
self._player.spawn(), self._enemy.spawn()
def player_attack():
if self._player:
self._player.attack((0, 1))
def enemy_attack():
if self._enemy:
self._enemy.attack((0, -1))
def randomize_weapons():
random_weapon = random.choice(Game.content_manager.weapons)().name
Game.notification_manager.add_notification(Notification(f"Got {random_weapon}", color=color.blue))
self._player.swap_weapon(Game.content_manager.get_weapon(random_weapon))
self._enemy.swap_weapon(Game.content_manager.get_weapon(random_weapon))
self.make_button("Set Scene", set_scene)
self.get_button(index=0).on_click()
self.make_button("Player Attack", player_attack)
self.make_button("Enemy Attack", enemy_attack)
self.make_button("Random Weapon", randomize_weapons)
def _unload(self):
destroy(self._player)
destroy(self._enemy)
| GDcheeriosYT/Gentrys-Quest-Ursina | Screens/Testing/TestingCategories/Gameplay/FightTest.py | FightTest.py | py | 2,100 | python | en | code | 1 | github-code | 13 |
29033393404 | #data preprocessing
#import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelencoder_independantvar=LabelEncoder()
dataset=pd.read_excel('Friesian.xlsx')
#extracting Feature columns that is independant variable matrix
#we could use -1 instead of 3 same result will come
dataset.head()
for column in dataset.columns:
if dataset[column].dtype == type(object):
dataset[column] = labelencoder_independantvar.fit_transform(dataset[column])
# =============================================================================
independantFeatures=dataset.iloc[:, :-1].values
depandantvar=dataset.iloc[:,6].values
# #special case for showing missing nan vlue
# np.set_printoptions(threshold=np.nan)
# #handling Missing value by importing Imputer from SKLern
# from sklearn.preprocessing import Imputer
# imputer=Imputer(missing_values='NaN',strategy='mean',axis=0)
# myimputer=imputer.fit(independantFeatures[:, 1:3])
# =============================================================================
#now transform the missingvalue with columns mean value and replace
#with nan value
# =============================================================================
# independantFeatures[:, 1:3]=myimputer.transform(independantFeatures[:, 1:3])
#
# #hadling categorical varilbe encoding
#this will fit and trnsform encoded value to independant features country column
independantFeatures[:,0]=labelencoder_independantvar.fit_transform(independantFeatures[:,0])
#print(list(independentFeatures))
#onehotencoder_independantvar=OneHotEncoder(categorical_features=[0])
#independantFeatures=onehotencoder_independantvar.fit_transform(independantFeatures).toarray()
# =============================================================================
# dealing with dependant variable categorical data
# =============================================================================
labelencoder_dependantvar=LabelEncoder()
depandantvar=labelencoder_dependantvar.fit_transform(depandantvar)
#print(list(depandantvar))
#
# =============================================================================
#splitting the dataset into Training set and Test set
from sklearn.cross_validation import train_test_split
independantVartrain,independantVartest,dependantVartrain,dependantVartest=train_test_split(independantFeatures,depandantvar, test_size=0.2,random_state=0)
#Feature scaling
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn import svm
scIvar=StandardScaler()
independantVartrain=scIvar.fit_transform(independantVartrain)
independantVartest=scIvar.fit_transform(independantVartest)
print(dependantVartrain)
dependantVartrain = np.reshape(dependantVartrain, (-1, 2))
dependantVartest = np.reshape(dependantVartest, (-1, 2))
print(dependantVartrain)
dependantVartrain=scIvar.fit_transform(dependantVartrain)
dependantVartest=scIvar.transform(dependantVartest)
dependantVartrain = np.reshape(dependantVartrain, (-1, 1))
dependantVartest = np.reshape(dependantVartest, (-1, 1))
print(dependantVartrain)
print(independantVartrain)
import statsmodels.api as sm
import matplotlib.pyplot as plt
est = sm.OLS(dependantVartrain, independantVartrain).fit()
print(est.summary())
#print(len(independantVartrain), len(dependantVartrain))
clf = LinearRegression()
#independantVartrain = np.argmax(independantVartrain, axis=1)
#independantVartest = np.argmax(independantVartest, axis=1)
clf.fit(dependantVartrain, independantVartrain)
confidence = clf.score(dependantVartest, independantVartest)
print(confidence)
print(dependantVartest, independantVartest)
print(dependantVartrain, independantVartrain)
clf = svm.SVR(kernel='linear')
independantVartrain = np.argmax(independantVartrain, axis=1)
independantVartest = np.argmax(independantVartest, axis=1)
clf.fit(dependantVartrain, independantVartrain)
confidence1 = clf.score(dependantVartest, independantVartest)
print(confidence1)
clf = svm.SVR(kernel='poly')
#independantVartrain = np.argmax(independantVartrain, axis=1)
#independantVartest = np.argmax(independantVartest, axis=1)
clf.fit(dependantVartrain, independantVartrain)
confidence2 = clf.score(dependantVartest, independantVartest)
print(confidence2)
clf = svm.SVR(kernel='rbf')
#independantVartrain = np.argmax(independantVartrain, axis=1)
#independantVartest = np.argmax(independantVartest, axis=1)
clf.fit(dependantVartrain, independantVartrain)
confidence3 = clf.score(dependantVartest, independantVartest)
print(confidence3)
clf = svm.SVR(kernel='sigmoid')
#independantVartrain = np.argmax(independantVartrain, axis=1)
#independantVartest = np.argmax(independantVartest, axis=1)
clf.fit(dependantVartrain, independantVartrain)
confidence4 = clf.score(dependantVartest, independantVartest)
print(confidence4)
#for k in ['linear','poly','rbf','sigmoid']:
# clf = svm.SVR(kernel=k)
# independantVartrain = np.argmax(independantVartrain, axis=1)
# independantVartest = np.argmax(independantVartest, axis=1)
# clf.fit(dependantVartrain, independantVartrain)
# confidence1 = clf.score(dependantVartest, independantVartest)
# print(k,confidence1)
| labibmasud251/SVR | mydataprocess.py | mydataprocess.py | py | 5,241 | python | en | code | 0 | github-code | 13 |
42520625781 | #Saltelli algorithm for computing firs and total order sensitivity indices
import numpy as np
import chaospy as cp
from numpy.core.fromnumeric import size
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, './Assignment1')
from functions import c
import parameters as par
x = par.x
t = par.t
fig, axes = plt.subplots(nrows = len(x), ncols = len(t))
fig.suptitle('Sobol indices', fontsize=20)
width = 0.3
axisLabelFontSize = 10
#fig2, axes2 = plt.subplots(nrows = len(x), ncols = len(t))
#fig2.tight_layout()
file = open('output2.txt', 'w')
#for N in [1000, 10000, 100000]: #sample size
N = 2**16
print("Sample size =", N)
print()
file.write("Sample size = ")
file.write(str(N))
file.write('\n')
file.write('\n')
file.write('\n')
for distIndex, distance in enumerate(x):
for timeIndex, timestep in enumerate(t):
# Get Matrix A
n = cp.Uniform(par.nRange[0], par.nRange[1])
D = cp.Uniform(par.DRange[0], par.DRange[1])
q = cp.Uniform(par.qRange[0], par.qRange[1])
Lambda = cp.Uniform(par.LambdaRange[0], par.LambdaRange[1])
jointdist = cp.J(n, D, q, Lambda)
A = np.transpose(jointdist.sample(size = N))
#transpose, so that it's the same as in the algorithm description
#print(A.shape)
#print(np.max(A, axis = 0))
#print(np.min(A, axis = 0))
# Get matrix B
n2 = cp.Uniform(par.nRange[0], par.nRange[1])
D2 = cp.Uniform(par.DRange[0], par.DRange[1])
q2 = cp.Uniform(par.qRange[0], par.qRange[1])
Lambda2 = cp.Uniform(par.LambdaRange[0], par.LambdaRange[1])
jointdist2 = cp.J(n2, D2, q2, Lambda2)
B = np.transpose(jointdist2.sample(size = N))
#https://stackoverflow.com/questions/3059395/numpy-array-assignment-problem
Cn = B.copy()
CD = B.copy()
Cq = B.copy()
CLambda = B.copy()
Cn[:,0] = A[:,0]
CD[:,1] = A[:,1]
Cq[:,2] = A[:,2]
CLambda[:,3] = A[:,3]
# Get concentrations
yA = c(distance, timestep, par.M, A[:,0], A[:,1], A[:,2], A[:,3])
yB = c(distance, timestep, par.M, B[:,0], B[:,1], B[:,2], B[:,3])
yCn = c(distance, timestep, par.M, Cn[:,0], Cn[:,1], Cn[:,2], Cn[:,3])
yCD = c(distance, timestep, par.M, CD[:,0], CD[:,1], CD[:,2], CD[:,3])
yCq = c(distance, timestep, par.M, Cq[:,0], Cq[:,1], Cq[:,2], Cq[:,3])
yCLambda = c(distance, timestep, par.M, CLambda[:,0], CLambda[:,1], CLambda[:,2], CLambda[:,3])
#We need nicer notations
print("x = ", distance, "time = ", timestep)
file.write("x = ")
file.write(str(distance))
file.write(" m")
file.write(" time = ")
file.write(str(timestep))
file.write(" days")
file.write('\n')
file.write('\n')
# First order sobol
file.write("First order Sobol indices")
file.write('\n')
print("First order Sobol indices")
Sn = (np.inner(yA, yCn) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("n: ")
file.write(str(Sn))
file.write('\n')
print("n: ", Sn)
SD = (np.inner(yA, yCD) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("D: ")
file.write(str(SD))
file.write('\n')
print("D: ", SD)
Sq = (np.inner(yA, yCq) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("q: ")
file.write(str(Sq))
file.write('\n')
print("q: ", Sq)
SLambda = (np.inner(yA, yCLambda) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("Lambda: ")
file.write(str(SLambda))
file.write('\n')
file.write('\n')
print("Lambda: ", SLambda)
file.write("Total Sobol indices")
file.write('\n')
print("Total Sobol indices")
STn = 1 - (np.inner(yB, yCn) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("n: ")
file.write(str(STn))
file.write("\n")
print("n: ", STn)
STD = 1 - (np.inner(yB, yCD) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("D: ")
file.write(str(STD))
file.write("\n")
print("D: ", STD)
STq = 1 - (np.inner(yB, yCq) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("q: ")
file.write(str(STq))
file.write("\n")
print("q: ", STq)
STLambda = 1 - (np.inner(yB, yCLambda) / N - np.mean(yA) * np.mean(yB)) / (np.mean(yA ** 2) - np.mean(yA) * np.mean(yB))
file.write("LAmbda: ")
file.write(str(STLambda))
file.write("\n")
file.write("\n")
print("Lambda: ", STLambda)
print()
#https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html
labels = ['n', 'D', 'q', 'Lambda']
sobol = [Sn, SD, Sq, SLambda]
total_sobol = [STn, STD, STq, STLambda]
l = np.arange(len(labels))
if np.isnan(Sn) == True:
axes[distIndex, timeIndex].bar(l - width / 2, [0, 0, 0, 0], width, label = 'First order')
axes[distIndex, timeIndex].bar(l + width / 2, [0, 0, 0, 0], width, label = 'Total')
axes[distIndex, timeIndex].set_title('x = {}, t = {}'.format(distance, timestep), fontsize = axisLabelFontSize)
axes[distIndex, timeIndex].set_xticks(l)
axes[distIndex, timeIndex].set_xticklabels(labels)
axes[distIndex, timeIndex].set_ylim([0,1])
else:
axes[distIndex, timeIndex].bar(l - width / 2, sobol, width, label = 'First order')
axes[distIndex, timeIndex].bar(l + width / 2, total_sobol, width, label = 'Total')
axes[distIndex, timeIndex].set_title('x = {}, t = {}'.format(distance, timestep), fontsize = axisLabelFontSize)
axes[distIndex, timeIndex].set_xticks(l)
axes[distIndex, timeIndex].set_xticklabels(labels)
axes[distIndex, timeIndex].set_ylim([0,1])
fig.tight_layout()
axes[0, 2].legend(loc=(0.70,1.1))
plt.show()
file.close()
#Reference: https://en.wikipedia.org/wiki/Variance-based_sensitivity_analysis
# ToDo: clean up code
# ToDo: compare to salib library
| Petru289/Uncertainty-Quantification-in-Hydrology | Assignment2/sobol.py | sobol.py | py | 6,566 | python | en | code | 0 | github-code | 13 |
8838600278 | import numpy as np
class PCA:
def __init__(self, data, component=1):
self.data = data
self.component = component
self.total, self.dimension = data.shape
if self.component >= self.total or self.component >= self.dimension:
raise "Invalid component"
self.transfer = None
self.result = None
self.train()
def train(self):
mean = np.mean(self.data, axis=0)
cov = np.cov(self.data - mean, rowvar=False)
value, vector = np.linalg.eig(cov)
order = np.argsort(value)
target = order[-self.component:]
self.transfer = np.real(vector[:, target])
self.result = np.dot(self.data - mean, self.transfer)
if __name__ == "__main__":
dataset = np.random.random((10, 3))
model = PCA(dataset, component=1)
result = model.result
print(dataset)
print(result)
| xxyQwQ/awesome-AI | AI1602 人工智能问题求解与实践/Project/Experiment/Algorithm/PCA.py | PCA.py | py | 893 | python | en | code | 8 | github-code | 13 |
5400099904 | # %% ####################
import torch
# value
t1 = torch.tensor(4.)
# vector
t2 = torch.tensor([1., 2, 3, 4])
# matrix
t3 = torch.tensor([[5, 6],
[7, 8],
[9, 10]])
# size of tensor
print(t3.shape)
# %% ####################
x = torch.tensor(3.)
w = torch.tensor(4., requires_grad=True)
b = torch.tensor(5., requires_grad=True)
y = w * x + b
# calculate gradient
y.backward()
# Display gradients
print('dy/dx:', x.grad)
print('dy/dw:', w.grad)
print('dy/db:', b.grad)
# %% ####################
t6 = torch.full((3,2), 42)
print(t6)
t7 = torch.cat((t3, t6))
print(t7)
# %% ####################
import numpy as np
x = np.array([[1, 2], [3, 4.]])
y = torch.from_numpy(x)
y
| a23956491z/deep-learning-research | python/pytorch-practice/basic.py | basic.py | py | 715 | python | en | code | 0 | github-code | 13 |
20293400484 | import contextlib
import os
from typing import Dict
from chaoslib.exceptions import InterruptExecution
from msrest.exceptions import AuthenticationError
from msrestazure.azure_active_directory import AADMixin
from chaosazure.auth.authentication import ServicePrincipalAuth, TokenAuth
AAD_TOKEN = "aad_token"
SERVICE_PRINCIPAL = "service_principal"
@contextlib.contextmanager
def auth(secrets: Dict) -> AADMixin:
"""
Create Azure authentication client from a provided secrets.
Service principle and token based auth types are supported. Token
based auth do not currently support refresh token functionality.
Type of authentication client is determined based on passed secrets.
For example, secrets that contains a `client_id`, `client_secret` and
`tenant_id` will create ServicePrincipalAuth client
```python
{
"client_id": "AZURE_CLIENT_ID",
"client_secret": "AZURE_CLIENT_SECRET",
"tenant_id": "AZURE_TENANT_ID"
}
```
If you are not working with Public Global Azure, e.g. China Cloud
you can provide `msrestazure.azure_cloud.Cloud` object. If omitted the
Public Cloud is taken as default. Please refer to msrestazure.azure_cloud
```python
{
"client_id": "xxxxxxx",
"client_secret": "*******",
"tenant_id": "@@@@@@@@@@@",
"cloud": "msrestazure.azure_cloud.Cloud"
}
```
If the `client_secret` is not provided, then token based credentials is
assumed and an `access_token` value must be present in `secrets` object
and updated when the token expires.
```
Using this function goes as follows:
```python
with auth(secrets) as cred:
subscription_id = configuration.get("subscription_id")
resource_client = ResourceManagementClient(cred, subscription_id)
compute_client = ComputeManagementClient(cred, subscription_id)
```
Again, if you are not working with Public Azure Cloud,
and you set azure_cloud in secret,
this will pass one more parameter `base_url` to above function.
```python
with auth(secrets) as cred:
cloud = cred.get('cloud')
client = ComputeManagementClient(
credentials=cred, subscription_id=subscription_id,
base_url=cloud.endpoints.resource_manager)
```
"""
# No input validation needed:
# 1) Either no secrets are passed at all - chaostoolkit-lib
# will handle it for us *or*
# 2) Secret arguments are partially missing or invalid - we
# rely on the ms azure library
yield __create(secrets)
##################
# HELPER FUNCTIONS
##################
def __create(secrets: Dict) -> AADMixin:
_auth_type = __authentication_type(secrets)
if _auth_type == SERVICE_PRINCIPAL:
_authentication = ServicePrincipalAuth()
elif _auth_type == AAD_TOKEN:
_authentication = TokenAuth()
try:
result = _authentication.create(secrets)
return result
except AuthenticationError as e:
msg = e.inner_exception.error_response.get('error_description')
raise InterruptExecution(msg)
def __authentication_type(secrets: dict) -> str:
if 'client_secret' in secrets and secrets['client_secret']:
return SERVICE_PRINCIPAL
elif 'access_token' in secrets and secrets['access_token']:
return AAD_TOKEN
elif os.getenv("AZURE_CLIENT_SECRET"):
return SERVICE_PRINCIPAL
elif os.getenv("AZURE_ACCESS_TOKEN"):
return AAD_TOKEN
else:
raise InterruptExecution(
"Authentication to Azure requires a"
" client secret or an access token")
| chaostoolkit-incubator/chaostoolkit-azure | chaosazure/auth/__init__.py | __init__.py | py | 3,679 | python | en | code | 22 | github-code | 13 |
39277666506 | from __future__ import division
import numpy as np
from scipy.optimize import minimize
# ##############################################################################
# LoadData takes the file location for the yacht_hydrodynamics.data and returns
# the data set partitioned into a training set and a test set.
# the X matrix, deal with the month and day strings.
# Do not change this function!
# ##############################################################################
def loadData(df):
data = np.loadtxt(df)
Xraw = data[:, :-1]
# The regression task is to predict the residuary resistance per unit weight of displacement
yraw = (data[:, -1])[:, None]
X = (Xraw - Xraw.mean(axis=0)) / np.std(Xraw, axis=0)
y = (yraw - yraw.mean(axis=0)) / np.std(yraw, axis=0)
ind = range(X.shape[0])
test_ind = ind[0::4] # take every fourth observation for the test set
train_ind = list(set(ind) - set(test_ind))
X_test = X[test_ind]
X_train = X[train_ind]
y_test = y[test_ind]
y_train = y[train_ind]
return X_train, y_train, X_test, y_test
# ##############################################################################
# Returns a single sample from a multivariate Gaussian with mean and cov.
# ##############################################################################
def multivariateGaussianDraw(mean, cov):
L = np.linalg.cholesky(cov) # calculate the cholesky decomposition of the VCV matrix
Z = np.random.standard_normal(len(mean)) # vector of iid std normal draws (~ N(x|0,1))
sample = mean + np.inner(L, Z) # sample is just the mean plus random variance based on the vcv
return sample
# ##############################################################################
# RadialBasisFunction for the kernel function
# k(x,x') = s2_f*exp(-norm(x,x')^2/(2l^2)). If s2_n is provided, then s2_n is
# added to the elements along the main diagonal, and the kernel function is for
# the distribution of y,y* not f, f*.
# ##############################################################################
class RadialBasisFunction():
def __init__(self, params):
self.ln_sigma_f = params[0]
self.ln_length_scale = params[1]
self.ln_sigma_n = params[2]
self.sigma2_f = np.exp(2 * self.ln_sigma_f)
self.sigma2_n = np.exp(2 * self.ln_sigma_n)
self.length_scale = np.exp(self.ln_length_scale)
def setParams(self, params):
self.ln_sigma_f = params[0]
self.ln_length_scale = params[1]
self.ln_sigma_n = params[2]
self.sigma2_f = np.exp(2 * self.ln_sigma_f)
self.sigma2_n = np.exp(2 * self.ln_sigma_n)
self.length_scale = np.exp(self.ln_length_scale)
def getParams(self):
print(np.array([self.ln_sigma_f, self.ln_length_scale, self.ln_sigma_n]))
return np.array([self.ln_sigma_f, self.ln_length_scale, self.ln_sigma_n])
def getParamsExp(self):
print(np.array([self.ln_sigma_f, self.ln_length_scale, self.ln_sigma_n]))
return np.array([self.sigma2_f, self.length_scale, self.sigma2_n])
# ##########################################################################
# covMatrix computes the covariance matrix for the provided matrix X using
# the RBF. If two matrices are provided, for a training set and a test set,
# then covMatrix computes the covariance matrix between all inputs in the
# training and test set.
# ##########################################################################
def covMatrix(self, X, Xa=None):
if Xa is not None:
X_aug = np.zeros((X.shape[0] + Xa.shape[0], X.shape[1]))
X_aug[:X.shape[0], :X.shape[1]] = X
X_aug[X.shape[0]:, :X.shape[1]] = Xa
X = X_aug
n = X.shape[0]
covMat = np.zeros((n, n))
# calculate the kernel for each covmat(i,j) according to data in X, using log params
l_term = -1.0 / (2 * (self.length_scale ** 2))
for i in range(n):
for j in range(n):
covMat[i][j] = self.sigma2_f * np.exp(
l_term * np.inner(X[i] - X[j], X[i] - X[j])) # X[i] is a row vector
# If additive Gaussian noise is provided, this adds the sigma2_n along
# the main diagonal. So the covariance matrix will be for [y y*]. If
# you want [y f*], simply subtract the noise from the lower right
# quadrant.
if self.sigma2_n is not None:
covMat += self.sigma2_n * np.identity(n)
# Return computed covariance matrix
return covMat
class GaussianProcessRegression():
def __init__(self, X, y, k):
self.X = X
self.n = X.shape[0]
self.y = y
self.k = k
self.K = self.KMat(self.X)
# ##########################################################################
# Recomputes the covariance matrix and the inverse covariance
# matrix when new hyperparameters are provided.
# ##########################################################################
def KMat(self, X, params=None):
if params is not None:
self.k.setParams(params)
K = self.k.covMatrix(X)
self.K = K
return K
# ##########################################################################
# Computes the posterior mean of the Gaussian process regression and the
# covariance for a set of test points.
# NOTE: This should return predictions using the 'clean' (not noisy) covariance
# ##########################################################################
def predict(self, Xa):
mean_fa = np.zeros((Xa.shape[0], 1))
cov_fa = np.zeros((Xa.shape[0], Xa.shape[0]))
VCV = self.k.covMatrix(self.X, Xa)
# the final matrix is y f* (ie: we have noisy training observations and non noisy test predictions)
if self.k.sigma2_n is not None:
for i in range(self.n, Xa.shape[0] + self.n):
VCV[i][i] = VCV[i][i] - self.k.sigma2_n
# decompose the conditional VCV matrix
K_noisy = VCV[0:self.n, 0:self.n]
K_ax = VCV[self.n:, 0:self.n]
K_aa = VCV[self.n:, self.n:]
K_xa = VCV[0:self.n, self.n:]
mean_fa = np.dot(np.dot(K_ax, np.linalg.inv(K_noisy)), self.y)
cov_fa = K_aa - np.dot(np.dot(K_ax, np.linalg.inv(K_noisy)), K_xa)
# Return the mean and covariance
return mean_fa, cov_fa
# ##########################################################################
# Return negative log marginal likelihood of training set. Needs to be
# negative since the optimiser only minimises.
# ##########################################################################
def logMarginalLikelihood(self, params=None):
if params is not None:
K = self.KMat(self.X, params)
# breaking this in to two lines for length
t1 = .5 * np.dot(np.dot(self.y.transpose(), np.linalg.inv(self.K)), self.y) + .5 * np.log(np.linalg.det(self.K))
lml = t1 + .5 * self.n * np.log(np.pi * 2)
return lml
# ##########################################################################
# Computes the gradients of the negative log marginal likelihood wrt each
# hyperparameter.
# ##########################################################################
def gradLogMarginalLikelihood(self, params=None):
if params is not None:
K = self.KMat(self.X, params)
old_params = self.k.getParamsExp()
K_bar = self.K - (old_params[2] * np.identity(self.n))
# reduce K to get the second term from the chain rule
K_2 = np.log(K_bar) - np.log(old_params[0])
K_3 = -1 * K_2 * 2 * ((old_params[1]) ** 2)
# each of the following is dK/dtheta
dK_sf = 2 * K_bar
dK_l = K_bar * K_3
dK_sn = old_params[2] * 2 * np.identity(self.n)
alpha = np.dot(np.dot(np.linalg.inv(self.K), self.y), ((np.dot(np.linalg.inv(self.K), self.y)).transpose()))
def grad_theta(dk_dTheta, a, K1):
return (-1 / 2) * (np.trace(np.dot(a - np.linalg.inv(K1), dk_dTheta)))
grad_ln_sigma_f = grad_theta(dK_sf, alpha, self.K)
grad_ln_length_scale = grad_theta(dK_l, alpha, self.K)
grad_ln_sigma_n = grad_theta(dK_sn, alpha, self.K)
gradients = np.array([grad_ln_sigma_f, grad_ln_length_scale, grad_ln_sigma_n])
# Return the gradients
return gradients
# ##########################################################################
# Computes the mean squared error between two input vectors.
# ##########################################################################
def mse(self, ya, fbar):
ylen = ya.shape[0]
mse = 0
for i in range(ylen):
mse += (ya[i] - fbar) ** 2
print(mse)
print(mse / ylen)
m = mse / ylen
return m[0]
# ##########################################################################
# Computes the mean standardised log loss.
# ##########################################################################
def msll(self, ya, fbar, cov):
ylen = ya.shape[0]
msll = 0
sig = cov[0][0] + self.k.getParamsExp()[2]
for i in range(ylen):
print(ya[i])
diff = ya[i] - fbar
msll += (.5 * np.log(2 * np.pi * sig)) + (diff ** 2) / (2 * sig)
res = (1 / ylen) * msll
return res[0]
# ##########################################################################
# Minimises the negative log marginal likelihood on the training set to find
# the optimal hyperparameters using BFGS.
# ##########################################################################
def optimize(self, params=None, disp=True):
p = np.array([.5 * np.log(1), np.log(.1), .5 * np.log(.5)])
res = minimize(self.logMarginalLikelihood, p, method='BFGS', jac=self.gradLogMarginalLikelihood,
options={'disp': disp})
print(res)
print(res.x)
return res.x
if __name__ == '__main__':
np.random.seed(42)
##########################
# You can put your tests here - marking
# will be based on importing this code and calling
# specific functions with custom input.
##########################
| cmw2196/fall_sem_imperial | gauss_process_cw.py | gauss_process_cw.py | py | 10,404 | python | en | code | 0 | github-code | 13 |
38889768693 | #! python3
#Write a program that finds all files
#with a given prefix in a single folder and locates any gaps in the numbering
#Have the program rename all the later files to close this gap.
import os,shutil,re
def FindFile(prefix,folder):
#get the absolute path of the folder
folder=os.path.abspath(folder)
#create regular expression depend on the prefix
#First regex:Chi lay toi phan tram
#Second regex:Chi co 3 don vi,2 don vi dau luon luon la so 0
reGex_2=re.compile(r"""^("""+prefix+""") #the prefix
00 #2 so khong o dau
(\d+) #the number
(.*?)$ #every word after the number
""",re.VERBOSE)
#Chi lay reGex_2 vi reGex_1 xay ra qua nhieu if
while True:
check=1
for filename in os.listdir(folder):
mo=reGex_2.search(filename)
if mo==None:
continue
fprefix=mo.group(1)
number=int(mo.group(2))
if number==1:
continue
pre_file=fprefix+'00'+str(number-1)+mo.group(3)
if not os.path.exists(os.path.join(folder,pre_file)):
check=0
_font=os.path.join(folder,filename)
_end=os.path.join(folder,pre_file)
print('Changing name '+_font+' to '+_end)
shutil.move(_font,_end)
if check ==1:
break
FindFile('spam','Superfolder') | DrakeChow3/Stupid-stuff | Script1/PreFix.py | PreFix.py | py | 1,555 | python | en | code | 0 | github-code | 13 |
1067217289 | from flask import redirect,Flask, render_template, request, url_for
import sqlite3
import datetime
app = Flask(__name__)
def add_database(fro,to,f_credit,t_credit,amount):
con = sqlite3.connect('database.db')
cur = con.cursor()
try:
cur.execute("UPDATE users SET credit=? WHERE email=?", [int(f_credit), fro])
con.commit()
cur.execute("UPDATE users SET credit=? WHERE email=?", [int(t_credit), to])
con.commit()
now = datetime.datetime.now()
date_string = now.strftime('%Y-%m-%d')
cur.execute("insert into credit values(?,?,?,?)", [fro, to, int(amount), date_string])
con.commit()
con.close()
return "Success"
except:
con.rollback()
return "error in insert operation"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/resume')
def resume():
return render_template('resume.html')
@app.route('/credit')
def credit():
return render_template('main.html')
@app.route('/view_all_user')
def view_all_user():
con = sqlite3.connect('database.db')
cur = con.cursor()
cur.execute('select * from users')
a = cur.fetchall()
con.close()
return render_template('view_all_user.html', result=a)
@app.route('/credit_transfer')
def credit_transfer():
con = sqlite3.connect('database.db')
cur = con.cursor()
cur.execute("select email from users")
a = cur.fetchall()
con.close()
return render_template('credit_transfer.html', result=a)
@app.route('/credit_transfer_submit', methods=['POST', 'GET'])
def credit_transfer_submit():
con = sqlite3.connect('database.db')
cur = con.cursor()
if request.method == 'POST':
fro = request.form['from']
to = request.form['to']
amount = request.form['amount']
if fro == to:
cur.execute("select email from users")
a = cur.fetchall()
return render_template('credit_transfer.html', result=a, fro=fro, to=to, amount=amount,
message="Sender and reciever must be different")
if int(amount) > 10000:
cur.execute("select email from users")
a = cur.fetchall()
return render_template('credit_transfer.html', result=a, fro=fro, to=to, amount=amount,
message="Amount must be smaller than 10000")
cur.execute("select credit from users where email=?", [fro])
a = cur.fetchall()
if a is None:
cur.execute("select email from users")
a1 = cur.fetchall()
return render_template('credit_transfer.html', result=a1, fro=fro, to=to, amount=amount,
message="Unknown error")
if int(a[0][0]) < int(amount):
cur.execute("select email from users")
a1 = cur.fetchall()
return render_template('credit_transfer.html', result=a1, fro=fro, to=to, amount=amount,
message="insufficient balance")
f_credit = int(a[0][0])
cur.execute("select credit from users where email=?", [to])
a = cur.fetchall()
if a is None:
cur.execute("select email from users")
a1 = cur.fetchall()
return render_template('credit_transfer.html', result=a1, fro=fro, to=to, amount=amount,
message="insufficient balance")
t_credit = int(a[0][0])
f_credit = f_credit - int(amount)
t_credit = t_credit + int(amount)
con.commit()
return add_database(fro,to,f_credit,t_credit,amount)
return redirect(url_for('credit_transfer_submit'))
cur.execute("select email from users")
a = cur.fetchall()
con.close()
return render_template('credit_transfer.html', result=a, message="Successfully Transferred credit")
@app.route('/search',methods=['POST','GET'])
def search():
if request.method=='POST':
search=request.form['search']
con = sqlite3.connect('database.db')
cur = con.cursor()
cur.execute('select *from users where email=? or name=?',[search,search])
a=cur.fetchall()
if a is None:
return render_template('main.html', notfound="Norecord found")
else:
return render_template('main.html', message=a)
return render_template('main.html',message="error")
@app.route('/login', methods=[ 'POST', 'GET' ])
def login():
content = request.get_json()
js = json.loads(json.dumps(content))
# This is the url to which the query is made
url = "https://auth.octagon58.hasura-app.io/v1/login"
# This is the json payload for the query
requestPayload = {
"provider": "username",
"data": {
"username": js[ 'data' ][ 'username' ],
"password": js[ 'data' ][ 'password' ]
}
}
# Setting headers
headers = {
"Content-Type": "application/json"
}
# Make the query and store response in resp
resp = requests.request("POST", url, data=json.dumps(requestPayload), headers=headers)
return resp.content
if __name__ == "__main__":
app.run()
| manishkumar212111/sparks_internship | app.py | app.py | py | 5,222 | python | en | code | 0 | github-code | 13 |
26771122238 | from .controltypes import Types
from .curtain import Curtain
class MinimunVentilation(object):
def __init__(self):
self._abrefecha = 30
self._aberto = 40
self._fechado = 180
self._limite = 30
self._state = Types.VM_INITIAL_STATE
self._time = 0
self._curtain = Curtain()
@property
def abrefecha(self):
return self._abrefecha
@abrefecha.setter
def abrefecha(self, value):
self._abrefecha = int(value)
print("MinimunVentilation - Changed abrefecha to ",self._abrefecha)
@property
def aberto(self):
return self._aberto
@aberto.setter
def aberto(self, value):
self._aberto = int(value)
print("MinimunVentilation - Changed aberto to ",self._aberto)
@property
def fechado(self):
return self._fechado
@fechado.setter
def fechado(self, value):
self._fechado = int(value)
print("MinimunVentilation - Changed fechado to ",self._fechado)
@property
def limite(self):
return self._limite
@limite.setter
def limite(self, value):
self._limite = int(value)
print("MinimunVentilation - Changed limite to ",self._limite)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
print("MinimunVentilation - Changed state to",self._state)
def fsm(self):
if self._state == Types.VM_INITIAL_STATE:
print("MinimunVentilation - Start to open!")
self._state = Types.VM_OPENING
self._time = 0
self._curtain.status = Types.OPENING
elif self._state == Types.VM_OPENING:
print("MinimunVentilation - Opening!")
self._time += 1
print("MinimunVentilation - Passed %s seconds" % self._time)
self._curtain.abertura += 1
if self._time >= self._abrefecha:
self._state = Types.VM_WAIT_OPEN
self._time = 0
self._curtain.status = Types.STOPPED
elif self._state == Types.VM_WAIT_OPEN:
print("MinimunVentilation - Waiting openned!")
self._time += 1
print("MinimunVentilation - Passed %s seconds" % self._time)
if self._time >= self._aberto:
self._state = Types.VM_CLOSING
self._time = 0
self._curtain.status = Types.CLOSING
elif self._state == Types.VM_CLOSING:
print("MinimunVentilation - Closing!")
self._time += 1
print("MinimunVentilation - Passed %s seconds" % self._time)
self._curtain.abertura -= 1
if self._time >= self._abrefecha:
self._state = Types.VM_WAIT_CLOSING
self._time = 0
self._curtain.status = Types.STOPPED
elif self._state == Types.VM_WAIT_CLOSING:
print("MinimunVentilation - Waiting closed!")
self._time += 1
print("MinimunVentilation - Passed %s seconds" % self._time)
if self._time >= self._fechado:
self._state = Types.VM_INITIAL_STATE
self._time = 0
self._curtain.status = Types.STOPPED
| gitandlucsil/curtain_temperature_control | models/minimum_ventilation.py | minimum_ventilation.py | py | 3,294 | python | en | code | 0 | github-code | 13 |
25714482062 | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import turtle
# Sierpinski Set
class SierpinskiSet:
def __init__(self, depth=5):
self.depth = depth
def draw_sierpinski(self, t, length, depth):
if depth == 0:
for _ in range(3):
t.forward(length)
t.left(120)
else:
self.draw_sierpinski(t, length / 2, depth - 1)
t.forward(length / 2)
self.draw_sierpinski(t, length / 2, depth - 1)
t.backward(length / 2)
t.left(60)
t.forward(length / 2)
t.right(60)
self.draw_sierpinski(t, length / 2,
depth - 1)
t.left(60)
t.backward(length / 2)
t.right(60)
def generate(self):
window = turtle.Screen()
t = turtle.Turtle()
t.speed(0)
t.penup()
t.goto(-200, 200)
t.pendown()
self.draw_sierpinski(t, 400, self.depth)
window.mainloop() | RickysChocolateBox/artificial_brain | ProtoBrainModel/SierpinskiSetclass.py | SierpinskiSetclass.py | py | 1,031 | python | en | code | 0 | github-code | 13 |
9087635290 | #https://www.acmicpc.net/problem/16194
#백준 16194번 카드 구매하기 2 (DP)
#import sys
#input = sys.stdin.readline
n = int(input())
cards = [0]+list(map(int, input().split()))
for i in range(1,n+1):
for j in range(i//2,i):
cards[i] = min(cards[i], cards[i-j]+cards[j])
print(cards[n])
#속도면에서 개선하기 위해서는 불필요한 부분은 생략해야 한다 | MinsangKong/DailyProblem | 07-03/3-2.py | 3-2.py | py | 402 | python | ko | code | 0 | github-code | 13 |
27187945803 | import sys
from collections import deque
input = sys.stdin.readline
def bfs(s):
queue = deque()
queue.append(s)
visited = [[[0] * (K + 1) for _ in range(M)] for _ in range(N)]
visited[s[0]][s[1]][K] = 1
while queue:
n = queue.popleft()
di, dj = [0, 1, 0, -1], [1, 0, -1, 0]
if n[0] == N - 1 and n[1] == M - 1:
return visited[n[0]][n[1]][n[2]]
for k in range(4):
ni, nj = n[0] + di[k], n[1] + dj[k]
if 0 <= ni < N and 0 <= nj < M :
if maze[ni][nj] == 0 and visited[ni][nj][n[2]] == 0:
queue.append([ni, nj, n[2]])
visited[ni][nj][n[2]] = visited[n[0]][n[1]][n[2]] + 1
elif maze[ni][nj] == 1 and visited[ni][nj][n[2] - 1] == 0:
if n[2] > 0:
queue.append([ni, nj, n[2] - 1])
visited[ni][nj][n[2] - 1] = visited[n[0]][n[1]][n[2]] + 1
return -1
N, M, K = map(int, input().split())
maze = [list(map(int, input().strip())) for _ in range(N)]
print(bfs([0, 0, K]))
| Nam4o/Algorithm | 백준/Gold/14442. 벽 부수고 이동하기 2/벽 부수고 이동하기 2.py | 벽 부수고 이동하기 2.py | py | 1,145 | python | en | code | 1 | github-code | 13 |
42798354745 | import argparse, os, subprocess, string, sys
from elftools.elf.elffile import ELFFile
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('--datadir', required=True)
args = p.parse_args()
return args
# -------------------------------->%
POSITIVES = [0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
NEGATIVES = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45]
def enumerate_params(args):
params = []
for pos in POSITIVES:
for neg in NEGATIVES:
params.append((pos, neg))
return params
def load_binlist(args):
list_path = os.path.join(args.datadir, 'binlist.txt')
bin_list = []
with open(list_path) as f:
for line in f.readlines():
bin_list.append(line.strip().split(','))
return bin_list
# -------------------------------->%
def is_valid_res(path):
if not os.path.exists(path):
return False
if os.path.getsize(path) == 0:
return False
return True
def load_time(time_path):
with open(time_path) as f:
return float(f.read().strip())
# Out-of-scope
def load_oos_ranges(bin_path):
ranges = []
elf = ELFFile(open(bin_path, 'rb'))
plt = elf.get_section_by_name('.plt')
if plt is not None:
saddr = plt['sh_addr']
eaddr = saddr + plt['sh_size']
ranges.append((saddr, eaddr))
pltsec = elf.get_section_by_name('.plt.sec')
if pltsec is not None:
saddr = pltsec['sh_addr']
eaddr = saddr + pltsec['sh_size']
ranges.append((saddr, eaddr))
init = elf.get_section_by_name('.init')
if init is not None:
saddr = init['sh_addr']
eaddr = saddr + init['sh_size']
ranges.append((saddr, eaddr))
fini = elf.get_section_by_name('.fini')
if fini is not None:
saddr = fini['sh_addr']
eaddr = saddr + fini['sh_size']
ranges.append((saddr, eaddr))
return ranges
def is_out_of_scope(oos_ranges, addr):
for saddr, eaddr in oos_ranges:
if saddr <= addr and addr < eaddr:
return True
return False
def load_result(args, conf, bin_path, res_path):
oos_ranges = load_oos_ranges(bin_path)
result = set()
has_error = False
with open(res_path) as f:
for line in f.readlines():
line = line.strip()
has_error = not set(line[2:]).issubset(string.hexdigits)
if has_error:
break
addr = int(line, 16)
is_oos = is_out_of_scope(oos_ranges, addr)
if is_oos:
continue
result.add(addr)
if has_error:
return set(), True
else:
return result, False
def load_gt(gt_path):
gt = set()
with open(gt_path) as f:
for line in f.readlines():
gt.add(int(line.strip(), 16))
return gt
def accumulate_result(args, pos, neg, b, accuracy):
pkg, arch, comp, pie, opt, name = b
conf = pkg, arch, comp, pie, opt
bin_path = os.path.join(args.datadir, 'bench', 'bin', pkg, arch, comp, pie, opt, name)
gt_path = os.path.join(args.datadir, 'bench', 'gt', pkg, arch, comp, pie, opt, name)
res_path = os.path.join(args.datadir, 'results', 'param_%.2f_%.2f' % (pos, neg), pkg, arch, comp, pie, opt, name)
if (pos, neg) not in accuracy:
accuracy[(pos, neg)] = [0, 0, 0, 0] # nbin, tp, fp, fn
result, has_error = load_result(args, conf, bin_path, res_path)
assert not has_error
gt = load_gt(gt_path)
tp = len(result & gt)
fp = len(result - gt)
fn = len(gt - result)
accuracy[(pos, neg)][0] += 1
accuracy[(pos, neg)][1] += tp
accuracy[(pos, neg)][2] += fp
accuracy[(pos, neg)][3] += fn
def show_summary(args, params):
accuracy = {}
for pos, neg in params:
bin_list = load_binlist(args)
for b in bin_list:
accumulate_result(args, pos, neg, b, accuracy)
for pos, neg in params:
nbins, tp, fp, fn = accuracy[(pos, neg)]
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = 1 / (((1 / prec) + (1 / rec)) / 2)
prec = round(prec * 100, 2)
rec = round(rec * 100, 2)
f1 = round(f1 * 100, 2)
print('%.2f - %.2f: %.3f %.3f %.3f' % (pos, neg, prec, rec, f1))
# -------------------------------->%
def main(args):
params = enumerate_params(args)
show_summary(args, params)
# -------------------------------->%
if __name__ == '__main__':
args = parse_args()
main(args)
| B2R2-org/FunProbe | param/report.py | report.py | py | 4,173 | python | en | code | 3 | github-code | 13 |
17938824746 | import asyncio
from io import BytesIO
import discord
import pandas as pd
from plotly import express as px
from economytrack.abc import MixinMeta
class PlotGraph(MixinMeta):
async def get_plot(self, df: pd.DataFrame, y_label: str) -> discord.File:
return await asyncio.to_thread(self.make_plot, df, y_label)
@staticmethod
def make_plot(df: pd.DataFrame, y_label: str) -> discord.File:
fig = px.line(
df,
template="plotly_dark",
labels={"ts": "Date", "value": y_label},
)
fig.update_xaxes(tickformat="%I:%M %p\n%b %d %Y")
fig.update_yaxes(tickformat="si")
fig.update_layout(
showlegend=False,
)
bytefile = fig.to_image(format="png", width=800, height=500, scale=1)
buffer = BytesIO(bytefile)
buffer.seek(0)
file = discord.File(buffer, filename="plot.png")
return file
| vertyco/vrt-cogs | economytrack/graph.py | graph.py | py | 927 | python | en | code | 33 | github-code | 13 |
70624498258 | import re
class FilterMiddleware(object):
def process_request(self, request):
request.GET = request.GET.copy()
filters_dict = {}
removed_keys = []
for key in request.GET:
if key.startswith('filter['):
match = re.match(
r'filter\[(.*?)\].*',
key)
filters_dict[match.group(1)] = request.GET[key]
removed_keys.append(key)
for removed_key in removed_keys:
request.GET.pop(removed_key)
request.GET.update(filters_dict)
return None
| anehx/anonboard-backend | jsonapi/middleware.py | middleware.py | py | 604 | python | en | code | 0 | github-code | 13 |
43813281073 | #这是datacleaning的完整运行文件,先仅限于2014-11-30.txt这一个文件,测试一下速度
#bisai2excel即便是单场比赛也有5240次变盘,写入excel非常缓慢
#即便转成json,单场比赛写入后的文件居然有400M,因为拆分后每张表的keys都要重复一遍,这样就变得很大
#应该想办法把数据缩小,比如看能不能用多维数组之类的
#尝试用xarray然后以netCDF格式(.nc)存储,但是出了个问题,float object has no attribute 'encode'
#可能是因为数据集里有缺失值nan被当做字符串了,但是后面又有浮点数据,所以出错。
#只要设一个frametime作为索引,然后把所有的表连起来就可以了,这样就可以存在一个csv里————20200621
#当前的速度是一天比赛的数据预处理需要668秒,但是数据大小是2.3G,下一步是改用netCDF格式,以及去除一些无用的部分数据,看是否速度提升体积下降————20200621
#下一步是可能搞一个双索引(urlnum,frametime),然后一天的比赛都存在一个dataframe里————20200621
#单纯的netCDF,没有sparse,并不会使体积减小。暂时只能通过删数据来缩小体积
#另外还有一种方案,就是在训练中动态处理数据并提交,可以省空间
#把gailv等于fanhuanlv除以peilv,所以把这个删掉
from gevent import monkey;monkey.patch_all()
import gevent
import re
import datetime
import pandas as pd
import time
import csv
start=time.time()
'''先转成csv'''
#进入文件夹,生成文件名序列
f=open('D:\\data\\20141130-20160630\\2014-11-30.txt','r')
line=f.readline()
f.close()#要关闭数据集
#把这一行数据用re转成列表
datalist=re.findall('{.*?}',line)
keys=['date', 'urlnum', 'league', 'cid', 'zhudui', 'kedui', 'companyname', 'resttime', 'fanhuanlv',
'peilv_host', 'peilv_fair', 'peilv_guest',
'gailv_host', 'gailv_fair', 'gailv_guest',
'kailizhishu_host', 'kailizhishu_fair', 'kailizhishu_guest']
df=pd.DataFrame(columns=keys)#先列好字段
#把列表中各个元素转成字典,并且把peilv,gailv和kailizhishu分拆成三列,否则无法正确读入pandas
def str2dict(str):#讲datalist中的单个元素转换插入dataframe的函数
dic=eval(str)
del dic['timestamp']
dic['peilv_host']=dic['peilv'][0]
dic['peilv_fair']=dic['peilv'][1]
dic['peilv_guest']=dic['peilv'][2]
del dic['peilv']
del dic['gailv']
dic['kailizhishu_host']=dic['kailizhishu'][0]
dic['kailizhishu_fair']=dic['kailizhishu'][1]
dic['kailizhishu_guest']=dic['kailizhishu'][2]
del dic['kailizhishu']
return dic
dictlist=list(map(str2dict,datalist))#把datalist变成了字典形式的dictlist,即每个元素都是字典
with open('D:\\data_csv\\2014-11-30.csv','w',newline='') as f:
w=csv.DictWriter(f,keys)
w.writeheader()
w.writerows(dictlist)
'''再拆成单个比赛,写入文件夹'''
df=pd.read_csv('D:\\data_csv\\2014-11-30.csv',encoding='GBK')
def bisaiquery(num):
bisai=df.loc[lambda dfa:dfa.urlnum==num]
return bisai
urlnumlist=list(df['urlnum'].value_counts().index)#获得比赛列表
bisailist=list(map(bisaiquery,urlnumlist))#获得由各个比赛的dataframe组成的表
def bisai2csv(bisai):#把单场比赛转换成csv文件
urlnum=str(bisai.urlnum.values[0])
resttimelist=list(bisai.resttime.value_counts().sort_index(ascending=False).index)#获得该场比赛的变盘列表并排序
dfdict=list()
for i in resttimelist:
df=bisai.loc[lambda bisai:bisai.resttime>=i]
df['frametime']=i
df=df.set_index('frametime')
dfdict.append(df.drop_duplicates('cid',keep='last'))
newdict=pd.concat(dfdict)#一个新的
newdict.to_csv('D:\\data_csv\\'+urlnum+'.csv')
def coprocess(bisailist):#用协程的方式并发写入
ge = list()
for i in bisailist:
ge.append(gevent.spawn(bisai2csv,i))
gevent.joinall(ge)
coprocess(bisailist)
end=time.time()
print('耗时'+str(end-start)+'秒')
| Utschie/ML_Monitoring_Trade | Apocalypse/datacleaning_1.0.py | datacleaning_1.0.py | py | 4,155 | python | zh | code | 0 | github-code | 13 |
10336343860 | #!/usr/bin/env python3
import argparse
import asyncio
import logging
import socket
import socketserver
import threading
from db_operation import *
from database import db
from flask import g
def get_db():
if 'db' not in g:
g.db = connect_to_database()
return g.db
LOG = logging.getLogger(logging.basicConfig())
LOG.setLevel(logging.DEBUG)
ELECTION = b"vote"
HEALTHCHECK = b"ok?"
VICTORY = b"vctr"
WHOISLEADER = b"ldr?"
TRANSACTION = b"action"
ACK = b"ack"
NACK = b"nack"
CLIENT_TIMEOUT = 1
class Process(object):
"""
Process represents one leader-elected process. It has an ID and knows about a number of peers.
In its initial state:
- it is not leader
- it does not know who the leader is
- it is not in the process of electing a new leader
"""
def __init__(self, id, peers, qw=3, qr=1):
self.id = id
self.peers = peers
self.election = False
self.leader_id = None
self.multicaster = Multicaster()
self.qw = qw
self.qr = qr
def am_leader(self):
"""
am_leader returns True if a leader ID is known and the leader ID is my own ID.
Otherwise, returns False.
"""
if self.leader_id is None:
return False
return self.leader_id == self.id
def handle_request_vote(self, *args):
"""
handle_request_vote is the callback function invoked when someone requests a leader election.
"""
if self.election:
LOG.error("already doing an election!")
return NACK
LOG.info("doing an election")
thread = threading.Thread(target=self.perform_election)
thread.daemon = True
thread.start()
return ACK
def handle_request_healthcheck(self, *args):
"""
handle_request_healthcheck is the callback function invoked when someone asks if we are alive.
"""
return ACK
def handle_request_victory(self, *args):
"""
handle_request_victory is the callback function invoked when a leader is elected
"""
victor = int(args[0])
if victor < self.id:
return NACK # do not acknowledge leadership of filthy peasants
self.leader_id = victor
return ACK
def handle_request_leader(self, *args):
"""
handle_request_leader is the callback function invoked when someone asks who the leader is
"""
if self.leader_id is None:
return None
return ("%s:%d" % self.peers[self.leader_id]).encode("utf-8")
def perform_election(self):
"""
perform_election is invoked when we want to perform leader election
"""
if self.election:
return
self.election = True
# optimistically assume that we can be the leader initially
can_be_leader = True
try:
# notify all peers with a higher id
notify_peers = self.peers[self.id + 1 :]
if not notify_peers:
# it's up to us to take the mantle
return
responses = self.multicaster.multisend(ELECTION, notify_peers)
if any(e is not None and e == ACK for e in responses):
can_be_leader = False
finally:
if can_be_leader:
self.assume_leadership()
self.election = False
def assume_leadership(self):
"""
assume_leadership is invoked when we determine we may be the leader
"""
msg = VICTORY + b" %d" % self.id
other_peers = self.peers[: self.id] + self.peers[self.id + 1 :]
responses = self.multicaster.multisend(msg, other_peers)
LOG.debug("assume_leadership: responses: %s", responses)
num_acked = len(list(filter(lambda r: r is not None and r == ACK, responses)))
num_required = self.qw - 1
if num_acked < num_required:
LOG.warning("insufficient peers acked (wanted %d, got %d): not asssuming leadership", num_required, num_acked)
else:
self.leader_id = self.id
def handle_room_update_message(self, room):
"""
handle_room_update_message handles the message from other peers. Should implement 2-phase protocol.
- Check if leader; True --> Start 2 phase protocol
False --> do nothing.
:param room: Message from other peers. Assumed to be room number for now.
"""
LOG.info("Update on Room %d", int(room))
# Insert Database update operation here.
try:
# db = get_db()
# check = update_db(int(room), db) #Currently not working..
check = 1 #Placeholder
if check == 1:
successful = True
if successful:
LOG.info("Made successful update on room %d", int(room))
return ACK
else:
LOG.info("Unsuccessful update on room %d", int(room))
return NACK
except Exception as e:
LOG.error("got exception: " + str(e))
def run(self):
callbacks = {
ELECTION: self.handle_request_vote,
HEALTHCHECK: self.handle_request_healthcheck,
VICTORY: self.handle_request_victory,
WHOISLEADER: self.handle_request_leader,
TRANSACTION: self.handle_room_update_message,
}
hostport = self.peers[self.id]
with socketserver.TCPServer(hostport, Handler(callbacks)) as server:
try:
LOG.info("listening on {}:{}".format(*hostport))
server.serve_forever()
except KeyboardInterrupt as e:
LOG.critical("user sent keyboard interrupt, shutting down")
except Exception as e:
LOG.error("got exception: " + str(e))
class Handler(socketserver.BaseRequestHandler):
"""
Handler handles incoming messages and calls its relevant callback function.
"""
def __init__(self, callbacks):
self.callbacks = callbacks
def __call__(self, request, client_address, server):
h = Handler(self.callbacks)
socketserver.BaseRequestHandler.__init__(h, request, client_address, server)
def handle(self):
msg = self.request.recv(1024).strip()
LOG.debug("got {} from {}:{}".format(msg, *self.client_address))
if not msg:
return
verb = msg.split(b" ")[0]
args = msg.split(b" ")[1:]
if verb not in self.callbacks:
LOG.error("no idea what to do with {} {}".format(verb, args))
return
cbfunc = self.callbacks[verb]
resp = cbfunc(*args)
if resp:
self.request.sendall(resp)
class Multicaster(object):
"""
Multicaster handles sending messages to multiple peers simultaneously.
"""
def multisend(self, msg, peers):
"""
multisend sends msg to all peers simultaneously
"""
tasks = []
for peer in peers:
tasks.append(self.asend(msg, peer))
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
results = loop.run_until_complete(asyncio.gather(*tasks))
if isinstance(results, list):
return results
else:
return [results]
finally:
loop.stop()
async def asend(self, msg, peer):
"""
asend is an async wrapper for send.
"""
return self.send(msg, peer)
def send(self, msg, peer):
"""
send sends msg to peer
"""
host, port = peer
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(CLIENT_TIMEOUT)
try:
s.connect((host, port))
s.sendall(msg)
LOG.debug("sent {} to {}:{}".format(msg, host, port))
resp = s.recv(1024).strip()
LOG.debug("got {} from {}:{}".format(resp, host, port))
return resp
except socket.timeout:
LOG.error("timeout sending {} to {}:{}".format(msg, host, port))
return None
except ConnectionRefusedError:
LOG.error("error connecting to {}:{}".format(host, port))
return None
def parse_hostport(hostport_str):
"""
parse_hostport transforms a string "host:port" to a tuple of (host:str, port:int)
Example:
parse_hostport("localhost:12345") -> ("localhost", 12345)
"""
host, port_str = hostport_str.split(":")
return host.strip(), int(port_str.strip())
def main():
desc = '''
module for inter-process communication and leader election.
Sample usage follows:
# launch one instance binding to localhost:9990 (peers[0])
$ booking/ipc.py --id 0 --peers localhost:9990 localhost:9991 &
[1] 5313
INFO:root:listening on localhost:9990
# launch another process bindding to localhost:9991 (peers[1])
$ booking/ipc.py --id 1 --peers localhost:9990 localhost:9991 &
[2] 5476
INFO:root:listening on localhost:9991
# if we ask 0 who the leader is, it doesn't know
$ echo 'ldr?' | nc localhost 9990
DEBUG:root:got b'ldr?' from 127.0.0.1:48960
# we can ask 0 to initiate an election
$ echo 'vote' | nc localhost 9990
DEBUG:root:got b'vote' from 127.0.0.1:48966
INFO:root:doing an election
ackDEBUG:asyncio:Using selector: EpollSelector
# 0 asks 1 to vote
DEBUG:root:sent b'vote' to localhost:9991
DEBUG:root:got b'vote' from 127.0.0.1:47098
# 1 initiates an election
INFO:root:doing an election
DEBUG:root:got b'ack' from localhost:9991
DEBUG:asyncio:Using selector: EpollSelector
# 1 has no higher peer so declares itself victor
DEBUG:root:sent b'vctr 1' to localhost:9990
DEBUG:root:got b'vctr 1' from 127.0.0.1:48970
DEBUG:root:got b'ack' from localhost:9990
# now 0 knows who the leader is
$ echo 'ldr?' | nc localhost 9990
DEBUG:root:got b'ldr?' from 127.0.0.1:48972
localhost:9991
'''
parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--id", type=int, default=0)
parser.add_argument("--peers", type=str, nargs="+", default=["localhost:9999"])
parser.add_argument("--quorum_write", type=int, default=3)
parser.add_argument("--quorum_read", type=int, default=1)
args = parser.parse_args()
socketserver.TCPServer.allow_reuse_address = True
peers = list(map(parse_hostport, args.peers))
p = Process(args.id, peers, args.quorum_write, args.quorum_read)
p.run()
if __name__ == "__main__":
main()
| johnstcn/cs7ns6groupF | booking/ipc.py | ipc.py | py | 10,822 | python | en | code | 1 | github-code | 13 |
17055791894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MemberCardTemplateAuxiliaryItem import MemberCardTemplateAuxiliaryItem
from alipay.aop.api.domain.MemberCardTemplateHeaderConfig import MemberCardTemplateHeaderConfig
from alipay.aop.api.domain.MemberCardTemplateOperationItem import MemberCardTemplateOperationItem
from alipay.aop.api.domain.MemberCardTemplateSecondaryItem import MemberCardTemplateSecondaryItem
class MemberCardTemplateConfig(object):
def __init__(self):
self._auxiliary_item_list = None
self._header = None
self._operation_item_list = None
self._secondary_item_list = None
@property
def auxiliary_item_list(self):
return self._auxiliary_item_list
@auxiliary_item_list.setter
def auxiliary_item_list(self, value):
if isinstance(value, list):
self._auxiliary_item_list = list()
for i in value:
if isinstance(i, MemberCardTemplateAuxiliaryItem):
self._auxiliary_item_list.append(i)
else:
self._auxiliary_item_list.append(MemberCardTemplateAuxiliaryItem.from_alipay_dict(i))
@property
def header(self):
return self._header
@header.setter
def header(self, value):
if isinstance(value, MemberCardTemplateHeaderConfig):
self._header = value
else:
self._header = MemberCardTemplateHeaderConfig.from_alipay_dict(value)
@property
def operation_item_list(self):
return self._operation_item_list
@operation_item_list.setter
def operation_item_list(self, value):
if isinstance(value, list):
self._operation_item_list = list()
for i in value:
if isinstance(i, MemberCardTemplateOperationItem):
self._operation_item_list.append(i)
else:
self._operation_item_list.append(MemberCardTemplateOperationItem.from_alipay_dict(i))
@property
def secondary_item_list(self):
return self._secondary_item_list
@secondary_item_list.setter
def secondary_item_list(self, value):
if isinstance(value, list):
self._secondary_item_list = list()
for i in value:
if isinstance(i, MemberCardTemplateSecondaryItem):
self._secondary_item_list.append(i)
else:
self._secondary_item_list.append(MemberCardTemplateSecondaryItem.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.auxiliary_item_list:
if isinstance(self.auxiliary_item_list, list):
for i in range(0, len(self.auxiliary_item_list)):
element = self.auxiliary_item_list[i]
if hasattr(element, 'to_alipay_dict'):
self.auxiliary_item_list[i] = element.to_alipay_dict()
if hasattr(self.auxiliary_item_list, 'to_alipay_dict'):
params['auxiliary_item_list'] = self.auxiliary_item_list.to_alipay_dict()
else:
params['auxiliary_item_list'] = self.auxiliary_item_list
if self.header:
if hasattr(self.header, 'to_alipay_dict'):
params['header'] = self.header.to_alipay_dict()
else:
params['header'] = self.header
if self.operation_item_list:
if isinstance(self.operation_item_list, list):
for i in range(0, len(self.operation_item_list)):
element = self.operation_item_list[i]
if hasattr(element, 'to_alipay_dict'):
self.operation_item_list[i] = element.to_alipay_dict()
if hasattr(self.operation_item_list, 'to_alipay_dict'):
params['operation_item_list'] = self.operation_item_list.to_alipay_dict()
else:
params['operation_item_list'] = self.operation_item_list
if self.secondary_item_list:
if isinstance(self.secondary_item_list, list):
for i in range(0, len(self.secondary_item_list)):
element = self.secondary_item_list[i]
if hasattr(element, 'to_alipay_dict'):
self.secondary_item_list[i] = element.to_alipay_dict()
if hasattr(self.secondary_item_list, 'to_alipay_dict'):
params['secondary_item_list'] = self.secondary_item_list.to_alipay_dict()
else:
params['secondary_item_list'] = self.secondary_item_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MemberCardTemplateConfig()
if 'auxiliary_item_list' in d:
o.auxiliary_item_list = d['auxiliary_item_list']
if 'header' in d:
o.header = d['header']
if 'operation_item_list' in d:
o.operation_item_list = d['operation_item_list']
if 'secondary_item_list' in d:
o.secondary_item_list = d['secondary_item_list']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MemberCardTemplateConfig.py | MemberCardTemplateConfig.py | py | 5,222 | python | en | code | 241 | github-code | 13 |
12702637524 | '''
Check whether a element entered by a user is present in the array or not.
'''
def search_in_array(arr):
element = int(input('enter the number to search: '))
for i in range(len(arr)):
if element == arr[i]:
return True
else:
return False
if __name__ == '__main__':
n = int(input('enter the limit: '))
arr = [None] * n
arr = list(map(int, input().split(" ")))
print(search_in_array(arr))
| Mayur-Debu/Datastructures | Array/Basic/Exercise_2.py | Exercise_2.py | py | 449 | python | en | code | 0 | github-code | 13 |
26802304612 | from __future__ import print_function
import scripts_helper
import json
from keras.models import model_from_json
from deepmoji.model_def import deepmoji_architecture, load_specific_weights
from deepmoji.global_variables import RAW_DATA_PATH, MODEL_DATA_PATH, \
STOCKTWITS_DATA_PATH, ORIGINAL_DATA_PATH, PROCESSED_DATA_PATH, TWITTER_DATA_PATH
from deepmoji.sentence_tokenizer import SentenceTokenizer
import pandas as pd
import re
import numpy as np
import pytz
import datetime
def bull(x):
ratio = float(1 + np.sum(x > 0)) / float(1 + np.sum(x < 0))
return np.log(ratio)
"""
Aggregation method: define how the tweets are aggregated
"""
# Define whether the aggregation takes place close-to-close or open-to-open:
aggregation_tweets = 'c2c'
if aggregation_tweets == 'c2c':
hours_shift = 8
minutes_shift = 0
elif aggregation_tweets == 'o2o':
hours_shift = -9
minutes_shift = -30
else:
hours_shift = 0
minutes_shift = 0
"""
Filtering: define how the tweets should be filtered, i.e. which tweets should be removed
"""
# Keep only messages which contain the company's cashtag:
has_cashtag = True
# Keep only messages which only contain the compnay's cashtag:
unique_cashtag = True
# Unique cashtags makes only sense if we keep only tweets which mention the company's cashtag:
unique_cashtag = unique_cashtag and has_cashtag
"""
Other settings: define time zones, location of data, ...
"""
# Define time zones:
tz_utc_in_stocktwits = pytz.timezone('UTC')
tz_zurich_in_twitter = pytz.timezone('Europe/Zurich')
tz_est_out = pytz.timezone('America/New_York')
# Define location of the relevant data:
dataset_path = '{}/DataDeepMojiStockTwits_finetuning.pickle'.format(RAW_DATA_PATH)
model_path = '{}/DeepMojiStockTwits_model.json'.format(MODEL_DATA_PATH)
specs_path = '{}/DeepMojiStockTwits_specs.json'.format(MODEL_DATA_PATH)
weights_path = '{}/DeepMojiStockTwits_weights.h5'.format(MODEL_DATA_PATH)
vocab_path = '{}/DeepMojiStockTwits_vocab.json'.format(MODEL_DATA_PATH)
# File specification:
file_specifications = ''
file_specifications = file_specifications + aggregation_tweets
if has_cashtag:
file_specifications = file_specifications + '_cashtag_only'
if unique_cashtag:
file_specifications = file_specifications + '_unique'
"""
Model: define and load all model-related things
"""
# Specify number of classes used for the classification:
nb_classes = 2
# Load the vocabulary:
with open(vocab_path, 'r') as f:
vocab = json.load(f)
# Load model specifications:
with open(specs_path, 'r') as f:
model_specs = json.load(f)
# Define the sentence tokenizer:
st = SentenceTokenizer(vocab, model_specs['maxlen'])
# Define architecture of the model:
model = deepmoji_architecture(nb_classes=nb_classes,
nb_tokens=len(vocab),
maxlen=model_specs['maxlen'],
embed_dropout_rate=0.25,
final_dropout_rate=0.5,
embed_l2=1E-6)
# Load weights of the model:
load_specific_weights(model=model, weight_path=weights_path)
# Load information about stocks for which we have data issues:
data_issue = pd.read_csv(
RAW_DATA_PATH + '/data_issue_info.tsv',
delimiter='\t')
# Load mapping file for the companies:
company_mapping = pd.read_csv(
RAW_DATA_PATH + "/SP500_Company_Mapping.tsv",
delimiter="\t")
"""
Information: load information about the stocks and the stock market
"""
# Exclude stocks for which we have data issues:
to_be_excluded = data_issue.loc[data_issue['exclude'] == 1, 'rpid'].values
to_remove = company_mapping['rpid'].map(lambda x: x in to_be_excluded)
company_mapping = company_mapping.loc[~to_remove, ]
# Load data with information about closing times/days of the NYSE:
holidays = pd.read_csv(ORIGINAL_DATA_PATH + '/Miscellaneous/NYSE_closing_days.tsv', delimiter='\t')
# Change column names:
holidays.columns = ['Date', 'Time', 'Holiday']
# Remove the time column:
holidays = holidays.drop('Time', axis=1)
# Define whether a date is a holiday:
holidays['Holiday'] = holidays['Holiday'].map(lambda x: x == 1)
# Transform the date-strings into datetime objects:
holidays['Date'] = holidays['Date'].map(lambda x: pd.Timestamp(x))
# Define a data-frame with all dates in our sample
closing_info = pd.DataFrame({'Date': pd.date_range(start=datetime.datetime(2010, 1, 1), end=datetime.datetime(2019, 1, 1))})
# Define a column that indicates all weekends:
closing_info['Weekend'] = closing_info['Date'].map(lambda x: x.weekday() in [5, 6])
# Merge the new data-frame with the information about holidays
closing_info = closing_info.merge(holidays, how='left', on='Date')
closing_info['Holiday'] = closing_info['Holiday'].fillna(False)
# Mark all closed days (either holiday or weekend)
closing_info['Closed'] = closing_info.apply(lambda x: x['Weekend'] or x['Holiday'], axis=1)
# Ensure that dates are a datetime object
closing_info.Date = closing_info.Date.dt.date
# Remove columns that are no longer needed:
closing_info = closing_info.drop(['Weekend', 'Holiday'], axis=1)
"""
Predict and aggregate sentiment of StockTwits
"""
sentiment_stocktwits = pd.DataFrame()
for rpid_i in company_mapping['rpid'].unique():
# Load data:
data_i = pd.read_csv(
STOCKTWITS_DATA_PATH + '/' + rpid_i + '_tweets.tsv',
encoding="mbcs",
quotechar='"', delimiter="\t", engine='python')
# Keep only relevant columns:
data_i = data_i[['text', 'tweet_datetime']]
# Remove empty messages:
data_i = data_i.loc[data_i['text'].map(lambda x: x is not None), :]
# Define regular expression for the company's cashtag:
cashtag_regex_i = '|'.join(r'([$]{1}\b' + company_mapping.loc[company_mapping['rpid'] == rpid_i, 'taq_ticker'] + r'\b)')
# Count number of company cashtags:
data_i['num_companycashtag'] = data_i['text'].map(lambda x: len(re.findall(cashtag_regex_i, x)))
# Count the number of all cashtags:
data_i['num_cashtag'] = data_i['text'].map(lambda x: len(re.findall(r'[$]\b[a-zA-z]+\b', x)))
# If wanted, remove tweets that do not mention the company's cashtag:
if has_cashtag:
data_i = data_i.loc[data_i['num_companycashtag'] > 0]
# If wanted, remove tweets that mention other cashtags:
if unique_cashtag:
data_i = data_i.loc[data_i['num_cashtag'] == data_i['num_companycashtag']]
# Transform strings to timestamps:
data_i['tweet_datetime'] = data_i['tweet_datetime'].map(lambda x: pd.Timestamp(x, tz=tz_utc_in_stocktwits))
# Change timezone to Eastern Time:
data_i['tweet_datetime_ET'] = data_i['tweet_datetime'].map(lambda x: x.astimezone(tz_est_out))
# Shift time depending on the aggregation scheme chosen previously:
data_i['tweet_datetime_ET_shifted'] = \
data_i['tweet_datetime_ET'].map(lambda x: x + datetime.timedelta(hours=hours_shift, minutes=minutes_shift))
# Define date based on the shifted ET timestamp:
data_i['Date'] = data_i['tweet_datetime_ET_shifted'].dt.date
try:
texts = [unicode(x) for x in data_i['text']]
except UnicodeDecodeError:
texts = [x.decode('utf-8') for x in data_i['text']]
X = st.get_test_sentences(texts)
pred_sentiment = model.predict(X, model_specs['batch_size'])
data_i['DeepMoji'] = [(int(x > 0.5) - 0.5)*2 for x in pred_sentiment.flatten()]
# Aggregate sentiments on a daily basis:
sentiment_i = data_i.groupby('Date').aggregate({'DeepMoji': [bull, np.mean]})
# Delete the raw data:
del data_i
# Transform multi-index column names to single level:
sentiment_i.columns = ['_'.join(col).strip() for col in sentiment_i.columns.values]
# Date (which acts as an index) to a column:
sentiment_i.reset_index(level=0, inplace=True)
# Add information about RavenPack ID:
sentiment_i['rpid'] = rpid_i
# Append data:
sentiment_stocktwits = sentiment_stocktwits.append(sentiment_i, ignore_index=True)
# Remove the sentiment data:
del sentiment_i
# Save aggregated StockTwits sentiment:
sentiment_stocktwits.to_csv(PROCESSED_DATA_PATH + '/StockTwits_DeepMoji_daily_' + file_specifications + '.csv')
"""
Predict and aggregate sentiment of Twitter
"""
sentiment_twitter = pd.DataFrame()
for rpid_i in company_mapping['rpid'].unique():
# Load data:
data_i = pd.read_csv(
TWITTER_DATA_PATH + '/' + rpid_i + '_tweets.tsv',
encoding="mbcs",
quotechar='"', delimiter="\t", engine='python')
# Keep only relevant columns:
data_i = data_i[['text', 'datetime']]
# Define regular expression for the company's cashtag:
cashtag_regex_i = '|'.join(r'([$]{1}\b' + company_mapping.loc[company_mapping['rpid'] == rpid_i, 'taq_ticker'] + r'\b)')
# Count number of company cashtags:
data_i['num_companycashtag'] = data_i['text'].map(lambda x: len(re.findall(cashtag_regex_i, x)))
# Count the number of all cashtags:
data_i['num_cashtag'] = data_i['text'].map(lambda x: len(re.findall(r'[$]\b[a-zA-z]+\b', x)))
# If wanted, remove tweets that do not mention the company's cashtag:
if has_cashtag:
data_i = data_i.loc[data_i['num_companycashtag'] > 0]
# If wanted, remove tweets that mention other cashtags:
if unique_cashtag:
data_i = data_i.loc[data_i['num_cashtag'] == data_i['num_companycashtag']]
# Transform strings to timestamps:
data_i['datetime'] = \
data_i['datetime'].map(lambda x: pd.Timestamp(x).tz_localize(tz=tz_zurich_in_twitter, ambiguous=True))
# Change timezone to Eastern Time:
data_i['datetime_ET'] = data_i['datetime'].map(lambda x: x.astimezone(tz_est_out))
# Shift time depending on the aggregation scheme chosen previously:
data_i['datetime_ET_shifted'] = \
data_i['datetime_ET'].map(lambda x: x + datetime.timedelta(hours=hours_shift, minutes=minutes_shift))
# Define date based on the shifted ET timestamp:
data_i['Date'] = data_i['datetime_ET_shifted'].dt.date
# Encode text data:
try:
texts = [unicode(x) for x in data_i['text']]
except UnicodeDecodeError:
texts = [x.decode('utf-8') for x in data_i['text']]
X = st.get_test_sentences(texts)
pred_sentiment = model.predict(X, model_specs['batch_size'])
data_i['DeepMoji'] = [(int(x > 0.5) - 0.5)*2 for x in pred_sentiment.flatten()]
# Aggregate sentiments on a daily basis:
sentiment_i = data_i.groupby('Date').aggregate({'DeepMoji': [bull, np.mean]})
# Delete the raw data:
del data_i
# Transform multi-index column names to single level:
sentiment_i.columns = ['_'.join(col).strip() for col in sentiment_i.columns.values]
# Date (which acts as an index) to a column:
sentiment_i.reset_index(level=0, inplace=True)
# Add information about RavenPack ID:
sentiment_i['rpid'] = rpid_i
# Append data:
sentiment_twitter = sentiment_twitter.append(sentiment_i, ignore_index=True)
# Remove the sentiment data:
del sentiment_i
# Save aggregated Twitter sentiment:
sentiment_twitter.to_csv(PROCESSED_DATA_PATH + '/Twitter_DeepMoji_daily_' + file_specifications + '.csv') | dballinari/DeepMoji-StockTwits-Classifier | scripts/predict_sentiment.py | predict_sentiment.py | py | 11,175 | python | en | code | 1 | github-code | 13 |
41567663691 | def solution(p):
if not p:
return ""
u, v = split_p(p)
if correct(u):
return u + solution(v)
else:
answer = '(' + solution(v) + ')'
u = u[1:-1]
u = reverse(u)
answer += u
return answer
def split_p(p):
left_count = 0
right_count = 0
for i in range(len(p)):
if p[i] == '(':
left_count += 1
else:
right_count += 1
if left_count == right_count:
return p[:i + 1], p[i + 1:]
def correct(u):
stack = []
for char in u:
if char == '(':
stack.append(char)
else:
if not stack:
return False
stack.pop()
return True
def reverse(u):
result =""
for char in u:
if char == '(':
result += ')'
else:
result += '('
return result | bnbbbb/Algotithm | 프로그래머스/lv2/60058. 괄호 변환/괄호 변환.py | 괄호 변환.py | py | 901 | python | en | code | 0 | github-code | 13 |
19749816046 |
# coding: utf-8
# In[8]:
import pandas as pd
import preprocess_picanet
import os
# In[1]:
def airways(row):
if row['InvVentET'] == True or row['InvVentTT'] == True or row['Niv'] == True or row['AvsJet'] == True or row['AvsOsc'] == True or row['AsthmaIVBeph'] == True or row['Naso'] == True or row['Trach'] == True or row['OxTherapy'] == True or row['Apnoea'] == True or row['ObsAir'] == True:
return 'Yes'
return 'No'
# In[2]:
def cv(row):
if row['ArtLine'] == True or row['ExtPace'] == True or row['CvpMon'] == True or row['InfInotrope'] == True or row['Bolus'] == True or row['Cpr'] == True or row['Ecmo'] == True or row['Vad'] == True or row['AbPump'] == True or row['ArrhythmiaAATherapy'] == True:
return 'Yes'
return 'No'
# In[3]:
def renal(row):
if row['PeriDia'] == True or row['HaemoDia'] == True or row['HaemoFilt'] == True or row['PlasmaFilt'] == True or row['PlasmaExch'] == True:
return 'Yes'
return 'No'
# In[4]:
def neuro(row):
if row['IcpMon'] == True or row['IntCathEvd'] == True or row['StatusEpilepticusAEDrugs'] == True or row['LowGCS'] == True:
return 'Yes'
return 'No'
# In[5]:
def analgesic(row):
if row['EpiduralCatheter'] == True or row['ContIVSedative'] == True:
return 'Yes'
return 'No'
# In[7]:
def metabolic(row):
if row['Dka'] == True:
return 'Yes'
return 'No'
def exclude_activity(df, activity):
total = 0
for event in activity:
total += (df.shape[0]-df.loc[df.EventID != event].shape[0])
df = df.loc[df.EventID != event]
print(f'Number of records with unwanted events:\n{total}')
return df
# In[9]:
def transform(df, activity, id_picanet,id_redcap):
df['airways'] = df.apply(lambda row: airways(row), axis=1)
df['cv'] = df.apply(lambda row: cv(row), axis=1)
df['renal'] = df.apply(lambda row: renal(row), axis=1)
df['neuro'] = df.apply(lambda row: neuro(row), axis=1)
df['analgesic'] = df.apply(lambda row: analgesic(row), axis=1)
df['metabolic'] = df.apply(lambda row: metabolic(row), axis=1)
df = preprocess_picanet.clean_ids(df,id_picanet,id_redcap)
df = exclude_activity(df, activity)
# Find and save records without participant_ids
print(f'Number of records with no ids:\n{df[df.index.isnull()].shape[0]}')
df[df.index.isnull()].to_csv(os.path.join(preprocess_picanet.generating_data_files('Deletes'), 'no_ids_activity.csv'))
df = df[~df.index.isnull()]
df.to_csv(os.path.join(preprocess_picanet.generating_data_files('Data'), 'picanet_activity.csv'))
return df
def activity_summary(df, activity):
for act in activity:
# Count of presence of activity
df_yes = (df.groupby(['EventID'])
.apply(lambda x: (x[act]== 'Yes').sum())
.reset_index(name='countYes'))
df_yes = df_yes.set_index('EventID')
# Count of absence of activity
df_no = df.groupby(['EventID']).apply(lambda x: (x[act]== 'No').sum()).reset_index(name='countNo')
df_no = df_no.set_index('EventID')
# Merge dataframes
df_merge = df_yes.merge(df_no, left_index=True, right_index=True)
df_merge['Percentage'] = df_merge.apply(lambda x: x['countYes']/(x['countYes']+x['countNo']), axis=1)
print('Summary stats of', act,'activity')
print(df_merge.describe())
print('Count of participants that had', act,'activity')
print(len(df_merge.loc[df_merge['countYes'] == 0]))
| conorhaynesm/CodeLesson1 | activity_picanet.py | activity_picanet.py | py | 3,823 | python | en | code | 0 | github-code | 13 |
12941229568 | def execute(numofnodes, node_connections):
graph = create_dictionary(numofnodes, node_connections)
traverse(graph)
def create_dictionary(numofnodes, node_connections):
graph = {}
for node in range(numofnodes):
graph[node] = []
for conn in node_connections:
graph[conn[0]].append(conn[1])
graph[conn[1]].append(conn[0])
return graph
def traverse(graph):
visited = set()
def dfs(node):
visited.add(node)
for child in graph[node]:
if child not in visited:
dfs(child)
keys = list(graph)
print(keys)
dfs(keys[4])
print(visited)
print(execute(7, [[0, 1], [0, 2], [1, 3], [2, 3], [2, 5], [5, 6], [3, 4]]))
| kaushik84/python_code_examples | Graph_traverse.py | Graph_traverse.py | py | 758 | python | en | code | 0 | github-code | 13 |
3726115590 | from garage.envs import PointEnv
from garage.tf.algos.rl2 import RL2Env
class TestRL2Env:
# pylint: disable=unsubscriptable-object
def test_observation_dimension(self):
env = PointEnv()
wrapped_env = RL2Env(PointEnv())
assert wrapped_env.spec.observation_space.shape[0] == (
env.observation_space.shape[0] + env.action_space.shape[0] + 2)
obs = env.reset()
obs2 = wrapped_env.reset()
assert obs.shape[0] + env.action_space.shape[0] + 2 == obs2.shape[0]
obs, _, _, _ = env.step(env.action_space.sample())
obs2, _, _, _ = wrapped_env.step(env.action_space.sample())
assert obs.shape[0] + env.action_space.shape[0] + 2 == obs2.shape[0]
| jaekyeom/IBOL | garaged/tests/garage/envs/test_rl2_env.py | test_rl2_env.py | py | 728 | python | en | code | 28 | github-code | 13 |
7525302120 | import cv2
import numpy as np
import torch
# Load the YOLOv5 model using torch.hub.load
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
# Set the device to 'cuda' if available, otherwise use 'cpu'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
# Create a Kalman filter object
kalman = cv2.KalmanFilter(8, 4)
# Define the state matrix (x, y, w, h, dx, dy, dw, dh)
kalman.statePre = np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float32)
# Define the measurement matrix (x, y, w, h)
kalman.measurementMatrix = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0]], dtype=np.float32)
# Define the control matrix
kalman.controlMatrix = np.zeros((8, 4), np.float32)
# Define the process noise covariance matrix
kalman.processNoiseCov = 1e-4 * np.eye(8, dtype=np.float32)
# Define the measurement noise covariance matrix
kalman.measurementNoiseCov = 1e-1 * np.eye(4, dtype=np.float32)
# Initialize variables
is_first_detection = True
tracked_position = None
# Open video capture
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Perform person detection with YOLOv5
detections = model(frame)
for detection in detections.xyxy[0]:
if detection[4] > 0.5 and detection[5] == 0:
bbox = detection[:4]
measurement = np.array(bbox, dtype=np.float32)
# an if function is added if the if_first_detection is true then the kalman filter is initialized
# else the kalman filter is updated
if is_first_detection:
kalman.statePre[:4] = measurement
is_first_detection = False
else:
kalman.correct(measurement)
prediction = kalman.predict()
predicted_box = prediction[:4]
# Draw the predicted bounding box on the frame
x, y, w, h = predicted_box.astype(int)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Object Tracking", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| MidlajN/OpenCV | object_tracking/kalman_filter.py | kalman_filter.py | py | 2,282 | python | en | code | 0 | github-code | 13 |
21928522258 | # ------ #
# Utils for PlantUML format
# ------ #
# In this file there are functions, classes or variables that are used widely
# in the GitHub actions of this repository and imported accordingly in other
# scripts.
# - #
# System imports
# - #
import json
import os
from typing import Union
from json_manipulation import create_parent_json_paths, \
find_json_paths
from string_manipulation import remove_char_from_list, \
add_char_to_list, \
create_highlights_liner, \
validate_json_file_path
# -#
# Hardcoded values
# -#
plantUML_format_dictionary = {
"strings_to_strip": ["[", "]"],
"property_quote": '"',
"highlight_separator": " / "
}
# -#
# Helper functions
# -#
# -#
# Class definition
# -#
class PlantUMLFormatter:
"""
PlantUMLFormatter - a class for for converting JSON objects to PlantUML syntax.
Arguments:
- json_filepath (str): a string representing the path to the JSON file to be loaded.
The file must be of type .json. The file is only used if "json_data" was not given.
- json_data (dict): a dictionary representing the JSON data to be used.
"""
def __init__(self, json_filepath: str = "", json_data: Union[dict, None] = None):
self.load_json_data(json_filepath=json_filepath, json_data=json_data)
self.empty_jsonpath_list()
self.empty_highlight_header_list()
self.highlights_header = ""
def load_json_data(self, json_filepath: str = "", json_data: Union[bool, None] = None) -> dict:
"""
Loads JSON data from either a file or a dictionary.
It's used not only at the definition of the class, but also can be used later on
to modify the original JSON once the highlight header has been created. This allows to
highlight a reduced version of a JSON object
For example: if we highlight by 'termId' as property name, but want to highlight a
smaller JSON object, we could create the header with the full JSON object, and then
replace the saved JSON as a smaller version of it.
"""
# We either take the given JSON data or load the given JSON file
if isinstance(json_data, dict):
self.original_json = json_data
elif json_filepath:
validate_json_file_path(json_filepath)
self.json_filepath = json_filepath
with open(json_filepath, "r", encoding="utf-8") as json_file:
self.original_json = json.load(json_file)
else:
raise TypeError(
f"Expected either a JSON file ({json_filepath}) or JSON data and neither of them were provided."
)
def empty_jsonpath_list(self) -> None:
"""
Empties the JSON Paths list (self.jsonpaths_list)
"""
self.jsonpaths_list = []
def empty_highlight_header_list(self) -> None:
"""
Empties the list with JSON Paths in highlight format (self.highlight_header_list)
"""
self.highlight_header_list = []
def add_jsonpaths(self, condition_dict: dict) -> list:
"""
Adds to the list of JSON Paths (self.jsonpaths_list) new paths based on given conditions
from a condition dictionary.
"""
new_json_paths = find_json_paths(
json_data=self.original_json, condition_dict=condition_dict
)
# Avoid duplication of JSON Paths
for new_json_path in new_json_paths:
if new_json_path not in self.jsonpaths_list:
self.jsonpaths_list.append(new_json_path)
return self.jsonpaths_list
def format_json_paths(
self, plantUML_format_dictionary: dict, add_parent_properties: bool = True
) -> list:
"""
Formats the list of JSON Paths (self.jsonpaths_list) into PlantUML syntax, populating the list
of formatted JSON paths for highlights (self.highlight_header_list)
"""
# We get the parameters of the plantUML format dictionary
try:
strings_to_strip = plantUML_format_dictionary["strings_to_strip"] # e.g. ["[", "]"]
property_quote = plantUML_format_dictionary["property_quote"] # e.g. '"'
separator = plantUML_format_dictionary["highlight_separator"] # e.g. " / "
except KeyError as e:
raise KeyError( f"KeyError: {e}" )
if len(self.jsonpaths_list) == 0:
self.highlight_header_list = []
return self.highlight_header_list
# We separate each item of the JSON path
paths = list(path.split(".") for path in self.jsonpaths_list)
# And now format them in the correct PlantUML highlighting syntax
if strings_to_strip is not None and isinstance(strings_to_strip, list):
for string in strings_to_strip:
paths = list(remove_char_from_list(path, string) for path in paths)
if add_parent_properties:
# Now we are going to add the parental paths so that they are also highlighted
# instead of just having the leaves highlighted
for path in paths:
full_path_list = create_parent_json_paths(path)
for n_path in full_path_list:
if n_path not in paths:
paths.append(n_path)
paths = list(add_char_to_list(path, property_quote) for path in paths)
paths = list(separator.join(path) for path in paths)
self.highlight_header_list = paths
return self.highlight_header_list
def create_highlights_header(self) -> str:
"""
Creates the header based on the list of one-liners for the PlantUML highlights
"""
self.highlights_header = "".join(
create_highlights_liner(json_path_str)
for json_path_str in self.highlight_header_list
)
return self.highlights_header
def save_all(self, output_filepath: str, overwrite: bool = False) -> None:
"""
Saves the JSON object with the correct start and end of file and its header
Example: PlantUMLFormatter.save_all()
"""
if os.path.isfile(output_filepath) and not overwrite:
raise ValueError(
f"File '{output_filepath}' already exists and 'overwrite' was set to False.\n "
f"\tChoose a different filename or set overwrite to True."
)
with open(output_filepath, "w", encoding="utf-8") as f:
# Move the file pointer to the beginning of the file and we add the PlantUML syntax one-liner
f.seek(0, 0)
f.write("@startjson\n")
# We then add the header
f.write(self.highlights_header)
f.write("\n")
# Then the JSON file per se
f.write(json.dumps(self.original_json, indent=4))
# And finally "@endjson" to the end of the file
f.write("\n@endjson")
| EbiEga/ega-metadata-schema | .github/scripts/utils/plantuml_format.py | plantuml_format.py | py | 7,010 | python | en | code | 4 | github-code | 13 |
73503639376 |
import requests
from LedgerBoardApp.models import Node
import time
#distributes a new post or block to all known nodes
def distributeEntity(dataArray, type, originHost, selfHost):
urlAddition = ""
payload = {}
if type == "block":
urlAddition = "/newBlock/"
payload = {
'index':str(dataArray[0]),
'ts':str(dataArray[1]),
'prevBlockHash': str(dataArray[2]),
'target':str(dataArray[3]),
'nonce':str(dataArray[4]),
'postArray': str(dataArray[5]),
'originHost': str(selfHost)
}
elif type == "post":
urlAddition = "/newPost/"
payload = {
'pubk':str(dataArray[0]),
'ts':str(dataArray[1]),
'content':str(dataArray[2]),
'sig':str(dataArray[3]),
'originHost': str(selfHost)
}
'''if originHost != 'self':
nodes = Node.objects.exclude(host=originHost)
else:'''
nodes = Node.objects.all()
feedbackDictionary = {}
if nodes.__len__() == 0:
print('no nodes')
for node in nodes:
if node.host == originHost:
continue
currentTime = int(time.time())
url = "http://" + str(node.host) + urlAddition
try:
print("distributing to:" + str(url))
r = requests.post(url, data=payload, timeout=15)
feedbackDictionary[str(node.host)] = r.content
node.secondsSinceLastInteraction = currentTime
node.save()
except:
if int(node.secondsSinceLastInteraction) < int(currentTime - 172800):
node.delete()
feedbackDictionary[str(node.host)] = "Node took too long."
return feedbackDictionary
| basilthebeagles/LedgerBoard | LedgerBoardApp/helperFunctions/distributeEntity.py | distributeEntity.py | py | 1,781 | python | en | code | 0 | github-code | 13 |
43263383112 | from collections import Counter
n = int(input())
a = list(map(int, input().split()))
mod = 10 ** 9 + 7
cnt = Counter(a)
if any(n % 2 == i % 2 for i in cnt):
print(0); exit()
if any(cnt[i] != 2 for i in cnt if i != 0):
print(0); exit()
print(pow(2, n // 2, mod))
| Shirohi-git/AtCoder | arc058-/arc066_a.py | arc066_a.py | py | 272 | python | en | code | 2 | github-code | 13 |
70502037778 | from chat.models import Chat
def getNumUnreadChatsForAccount(accountId):
buyerChats = Chat.objects.filter(
buyer__pk=accountId
).filter(hasUnreadBuyer=True)
sellerChats = Chat.objects.filter(
item__seller__pk=accountId
).filter(hasUnreadSeller=True)
return len(buyerChats) + len(sellerChats)
| vanshg/Bakkle | www/bakkle/common/methods.py | methods.py | py | 332 | python | en | code | 0 | github-code | 13 |
73098271056 | """
This file contains the definition of the SMPL model
forward: using pose and beta calculate vertex location
function get joints: calculate joints from vertex location
"""
from __future__ import division
from numpy.core.defchararray import array
import cv2
import torch
import torch.nn as nn
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from smpl.geometry import rodrigues
import smpl.config as cfg
class SMPL(nn.Module):
def __init__(self,
gender='neutral'):
"""
Args:
center_idx: index of center joint in our computations,
model_file: path to pkl files for the model
gender: 'neutral' (default) or 'female' or 'male'
"""
super(SMPL, self).__init__()
if gender == 'female':
model_file = cfg.SMPL_FEMALE
elif gender == 'male':
model_file = cfg.SMPL_MALE
else:
model_file = cfg.SMPL_NEUTRAL
with open(model_file, 'rb') as f:
smpl_model = pickle.load(f, encoding='iso-8859-1')
J_regressor = smpl_model['J_regressor'].tocoo()
row = J_regressor.row
col = J_regressor.col
data = J_regressor.data
i = torch.LongTensor([row, col])
v = torch.FloatTensor(data)
J_regressor_shape = [24, 6890]
self.register_buffer('J_regressor', torch.sparse.FloatTensor(i, v,
J_regressor_shape).to_dense())
self.register_buffer(
'weights', torch.FloatTensor(smpl_model['weights']))
self.register_buffer(
'posedirs', torch.FloatTensor(smpl_model['posedirs']))
self.register_buffer(
'v_template', torch.FloatTensor(smpl_model['v_template']))
self.register_buffer('shapedirs',
torch.FloatTensor(np.array(smpl_model['shapedirs'])))
self.register_buffer('faces',
torch.from_numpy(smpl_model['f'].astype(np.int64)))
self.register_buffer('kintree_table', torch.from_numpy(
smpl_model['kintree_table'].astype(np.int64)))
id_to_col = {self.kintree_table[1, i].item(): i for i in
range(self.kintree_table.shape[1])}
self.register_buffer('parent', torch.LongTensor(
[id_to_col[self.kintree_table[0, it].item()] for it in
range(1, self.kintree_table.shape[1])]))
self.pose_shape = [24, 3]
self.beta_shape = [10]
self.translation_shape = [3]
self.pose = torch.zeros(self.pose_shape)
self.beta = torch.zeros(self.beta_shape)
self.translation = torch.zeros(self.translation_shape)
self.verts = None
self.J = None
self.R = None
self.joints_idx = cfg.JOINTS_IDX
self.requires_grad_(False)
def forward(self, pose, beta): # return vertices location
device = pose.device
batch_size = pose.shape[0]
v_template = self.v_template[None, :]
shapedirs = self.shapedirs.view(-1,
10)[None, :].expand(batch_size, -1, -1)
beta = beta[:, :, None]
v_shaped = torch.matmul(shapedirs, beta).view(-1, 6890, 3) + v_template
# batched sparse matmul not supported in pytorch
J = []
for i in range(batch_size):
J.append(torch.matmul(self.J_regressor, v_shaped[i]))
J = torch.stack(J, dim=0)
# input it rotmat: (bs,24,3,3)
if pose.ndimension() == 4:
R = pose
# input it rotmat: (bs,72)
elif pose.ndimension() == 2:
pose_cube = pose.view(-1, 3) # (batch_size * 24, 1, 3)
R = rodrigues(pose_cube).view(batch_size, 24, 3, 3)
R = R.view(batch_size, 24, 3, 3)
I_cube = torch.eye(3)[None, None, :].to(device)
# I_cube = torch.eye(3)[None, None, :].expand(theta.shape[0], R.shape[1]-1, -1, -1)
lrotmin = (R[:, 1:, :] - I_cube).view(batch_size, -1)
posedirs = self.posedirs.view(-1,
207)[None, :].expand(batch_size, -1, -1)
v_posed = v_shaped + torch.matmul(posedirs, lrotmin[:, :, None]).view(-1, 6890,
3)
J_ = J.clone()
J_[:, 1:, :] = J[:, 1:, :] - J[:, self.parent, :]
G_ = torch.cat([R, J_[:, :, :, None]], dim=-1)
pad_row = torch.FloatTensor([0, 0, 0, 1]).to(device).view(1, 1, 1, 4).expand(
batch_size, 24, -1, -1)
G_ = torch.cat([G_, pad_row], dim=2)
G = [G_[:, 0].clone()]
for i in range(1, 24):
G.append(torch.matmul(G[self.parent[i - 1]], G_[:, i, :, :]))
G = torch.stack(G, dim=1)
rest = torch.cat([J, torch.zeros(batch_size, 24, 1).to(device)], dim=2).view(
batch_size, 24, 4, 1)
zeros = torch.zeros(batch_size, 24, 4, 3).to(device)
rest = torch.cat([zeros, rest], dim=-1)
rest = torch.matmul(G, rest)
G = G - rest
T = torch.matmul(self.weights,
G.permute(1, 0, 2, 3).contiguous().view(24, -1)).view(6890,
batch_size,
4,
4).transpose(
0, 1)
rest_shape_h = torch.cat(
[v_posed, torch.ones_like(v_posed)[:, :, [0]]], dim=-1)
v = torch.matmul(T, rest_shape_h[:, :, :, None])[:, :, :3, 0]
return v
def get_full_joints(self, vertices):
"""
This method is used to get the joint locations from the SMPL mesh
Input:
vertices: size = (B, 6890, 3)
Output:
3D joints: size = (B, 24, 3)
"""
joints = torch.einsum('bik,ji->bjk', [vertices, self.J_regressor])
return joints
def get_leaf_joints(self, joints):
leaf_indexes = [0, 7, 8, 12, 20, 21]
return joints[:, leaf_indexes, :]
| climbingdaily/SLOPER4D | smpl/smpl.py | smpl.py | py | 6,214 | python | en | code | 58 | github-code | 13 |
2920602976 | import csv
from statistics import mean
csvpath = '/Users/Zhisen/Downloads/budget_data.csv'
with open(csvpath, 'r') as csvfile:
budget = csv.reader(csvfile, delimiter=',')
header = next(budget)
month_count = 0
total_net = 0
profit_list = []
month_list = []
for row in budget:
month_count += 1
total_net = total_net + int(row[1])
profit = row[1]
month = row[0]
profit_list.append(profit)
month_list.append(month)
i = 1
change_list = []
while i < month_count:
change = int(profit_list[i]) - int(profit_list[i-1])
change_list.append(change)
i = i + 1
average = mean(change_list)
maximum = max(change_list)
max_index = change_list.index(maximum) + 1
max_month = month_list[max_index]
minimum = min(change_list)
min_index = change_list.index(minimum) + 1
min_month = month_list[min_index]
print('Financial Analysis')
print('-------------------------')
print(f"Total Months: {month_count}")
print(f"Total: ${total_net}")
print(f"Average Change : {average: .2f}")
print(f"Greatest Increase in Profits: {max_month} (${maximum})")
print(f"Greatest Decrease in Profits: {min_month} (${minimum})")
title = 'Financial Analysis'
line = '-------------------------'
text1 = f"Total Months: {month_count}"
text2 = f"Total: ${total_net}"
text3 = f"Average Change : {average: .2f}"
text4 = f"Greatest Increase in Profits: {max_month} (${maximum})"
text5 = f"Greatest Decrease in Profits: {min_month} (${minimum})"
path = '/Users/Zhisen/python-challenge/PyBank/PyBank_output.txt'
with open(path, 'w') as outfile:
output = outfile
output.writelines(f'{title}\n{line}\n{text1}\n{text2}\n{text3}\n{text4}\n{text5}')
| Zhisen/python-challenge | PyBank/main.py | main.py | py | 1,804 | python | en | code | 0 | github-code | 13 |
26785879472 | import numpy as np
import cv2
from mss import mss
from PIL import Image
from Projects.DinoGame.ScreenRecorder import ScreenRecorder
from Projects.DinoGame.DinoWorld import DinoWorld
import Projects.DinoGame.KeyboardSim as KeyboardSim
import timeit
from Classification.NonLinear.NeuralNetwork.FeedForwardNN import FeedForwardNN
from Function.Cost.SquareError import SquareError
from Function.Output.Softmax import Softmax
from Function.Activation.TanH import TanH
from Function.Activation.RELU import RELU
class ChromeDinoGameBot2:
def __init__(self, screen_bbox, neighbors_per_set):
self.screen_capper = ScreenRecorder(screen_bbox)
self.world = DinoWorld(screen_bbox)
self.neighbors_per_set = neighbors_per_set
self.min_ml_features_to_train = 1+self.neighbors_per_set*2
self.ml_features = []
self.dino_actions = []
self.model = FeedForwardNN(np.zeros((1,4)), np.zeros((1,2)), TanH(), SquareError, Softmax, (3,2))
print('model: ', self.model)
def start(self):
'''last_game_state is used so that, on death, the model does not
train off of data multiple times (which includes nonsensical,
"game over" data). Checks to see if the last game state is not game
over, then sets the last game state to game over so that it does not
train more than once'''
last_game_state = "alive"
while(True):
thresh_image = self.grab_and_preprocess_snap()
game_state = self.world.update(thresh_image)
'''check here if game state is "game_over", train
ML model if it is'''
if game_state == "alive":
last_game_state = "alive"
self.update_ml_features()
if self.ml_features[len(self.ml_features)-1] is not None:
net_prediction = self.model.predict(self.ml_features[len(self.ml_features)-1])
KeyboardSim.press_key(net_prediction)
#print("net prediction: ", net_prediction)
else:
if last_game_state != "game_over" and len(self.ml_features) > self.min_ml_features_to_train:
self.train_net()
print("----------------------------------")
KeyboardSim.press_key(1)
self.ml_features = []
self.dino_actions = []
last_game_state = "game_over"
def update_ml_features(self):
NUM_ML_FEATURES_TO_TRACK = 20
if len(self.ml_features) >= NUM_ML_FEATURES_TO_TRACK:
del self.ml_features[0]
del self.dino_actions[0]
self.ml_features.append(self.world.get_ml_feature())
self.dino_actions.append(self.world.dino.state)
def train_net(self):
X,y = None, None
if self.dino_actions[len(self.dino_actions)-1] == "still":
'''train net assuming that the prior frame should have been a jump.
Additionally, assume NUM_NEIGHBORING_FRAMES prior frames to this
one should be classified as staying still'''
X,y = self.generate_still_death_data()
print("cause of death: still")
elif self.dino_actions[len(self.dino_actions)-1] == "falling":
'''train net assuming last "jump" command should have been later, +
FRAME_DELTA frames from the actual jump. Additionally, assume data
within +- NUM_NEIGHBORING_FRAMES within the new jump frame should
be classified as staying'''
X,y = self.generate_falling_death_data()
print("cause of death: falling")
elif self.dino_actions[len(self.dino_actions)-1] == "jumping":
'''train net assuming last "jump" command should have been earlier, -
FRAME_DELTA frames from actual jump. Additionally, assume data
within +- NUM_NEIGHBORING_FRAMES within the new jump frame should
be classified as staying'''
X,y = self.generate_jumping_death_data()
print("cause of death: jumping")
print("last few dino actions: ", self.dino_actions[len(self.dino_actions)-5:])
print("X: ", X)
print("y: ", y)
self.step_net(X,y)
def generate_still_death_data(self):
X = self.ml_features[len(self.ml_features)- self.neighbors_per_set :]
y = [np.array([0,1]) for i in range(0, len(X))]
#y[len(y)-1] = np.array([0,1])
return X,y
def generate_jumping_death_data(self):
'''assume the frame prior to jump should have been a jump,
and frame before should be still'''
jump_index = self.get_last_jump_index()
#prior_jump_index = jump_index - 1
X = self.ml_features[jump_index - self.neighbors_per_set : jump_index]#[self.ml_features[prior_jump_index], self.ml_features[prior_jump_index]]
y = [np.array([0,1]) for i in range(0, len(X))]#[np.array([1,0]), np.array([0,1])]
y[0] = np.array([1,0])
return X,y
def generate_falling_death_data(self):
'''assume the frames within neighbors_per_set after jump should have
been a jump, and jump frame should be still'''
jump_index = self.get_last_jump_index()
#after_jump_index = jump_index + 1
X = self.ml_features[jump_index : jump_index + self.neighbors_per_set]#[self.ml_features[jump_index], self.ml_features[after_jump_index]]
y = [np.array([1,0]) for i in range(0, len(X))]
y[0] = np.array([0,1])
return X,y
def get_last_jump_index(self):
for i in range(len(self.dino_actions)-1, 1, -1):
if self.dino_actions[i] == "jumping" and self.dino_actions[i-1] != "jumping":
return i
return None
def step_net(self, X, y):
for i in range(0, len(X)):
self.model.train_step(X[i], y[i], learn_rate = 0.1, bias_learn_rate = 0.01)
def grab_and_preprocess_snap(self):
thresh_image = self.binaritize_bw_image(cv2.cvtColor(self.screen_capper.snap(), cv2.COLOR_RGB2GRAY))
return thresh_image
def binaritize_bw_image(self, bw_image):
MEDIAN_BLUR_RUN_TIMES = 1
MEDIAN_BLUR_KERNEL_SIZE = 7
thresh_image = cv2.threshold(bw_image, 200, 1, cv2.THRESH_BINARY_INV)[1]
for i in range(0, MEDIAN_BLUR_RUN_TIMES):
thresh_image = cv2.medianBlur(thresh_image, MEDIAN_BLUR_KERNEL_SIZE)
return thresh_image
| peterhusisian/MLExperiments | Projects/DinoGame/ChromeDinoGameBot2.py | ChromeDinoGameBot2.py | py | 6,462 | python | en | code | 0 | github-code | 13 |
9063296083 | import sys
input = sys.stdin.readline
# n, m을 입력받음
n, m = map(int, input().split())
# 이름이 key인 딕셔너리
book1 = {}
# 번호가 key인 딕셔너리
book2 = {}
# 포켓몬 이름을 입력받고 딕셔너리 생성
for i in range(1, n + 1):
i = str(i)
s = input().rstrip()
book1[s] = i
book2[i] = s
# 출력문 생성을 위한 리스트
str_list = []
# 리스트에 딕셔너리를 이용해 출력문을 넣음
for _ in range(m):
s = input().rstrip()
if s.isdigit():
str_list.append(book2[s])
else:
str_list.append(book1[s])
# 출력문 생성
answer = '\n'.join(str_list)
# 결과 출력
print(answer)
| yudh1232/Baekjoon-Online-Judge-Algorithm | 1620 나는야 포켓몬 마스터 이다솜.py | 1620 나는야 포켓몬 마스터 이다솜.py | py | 676 | python | ko | code | 0 | github-code | 13 |
32904747255 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class bmrtable(models.Model):
gender_choices = (
('m' , "Male"),
('f', 'Female')
)
age = models.IntegerField()
height = models.DecimalField(max_digits=4, decimal_places=2)
weight = models.DecimalField(max_digits=5, decimal_places=2)
gender = models.CharField(max_length=1 , choices=gender_choices,)
class mettable(models.Model):
activity_choices = (
('7.5' , "cycling"),
('14', 'cycling up hill'),
('6', 'jogging'),
('6', 'running'),
('8', 'housework'),
('4', 'skipping'),
('11', 'walkingstairs'),
('5', 'pilates'),
('3', 'weight'),
)
level_choices = (
('0.01' , "Light"),
('0.02', 'Moderate'),
('0.03', "Vigorous"),
)
activity = models.CharField(max_length=50 , choices=activity_choices,)
level = models.CharField(max_length=10 , choices=level_choices,)
hours = models.IntegerField()
minutes = models.IntegerField()
| SnehaDL/MET-Calculator | simple/models.py | models.py | py | 1,173 | python | en | code | 0 | github-code | 13 |
318845839 | # Renders a 2D model into a PPM image
import sys
import numpy as np
from commands.draw_line import draw_line
from commands.polyline import polyline
from commands.polygon import polygon
from commands.change_color import change_color
from commands.paint_screen import paint_screen
from commands.matrix import set_matrix, multiply_matrix
# ---------- Configuration types and constants ----------
IMAGE_DTYPE = np.uint8
COORD_DTYPE = np.int64
MODEL_DTYPE = np.float64
MAX_SIZE = 1024
MAX_VAL = 255
MAX_LINE_LEN = 10240 - 1 # 10240 characters minus the \0 terminator
DEFAULT_BACKGROUND = 255
CHANNELS_N = 3
DEFAULT_COLOR = (0, 0, 0,)
# here we will register that functions that execute each command:
# each key of this dictionary will be the command (c, m, r, etc.)
# each value will the function that executes this command
COMMANDS = {
'c': paint_screen,
'L': draw_line,
'C': change_color,
'P': polyline,
'R': polygon,
'M': set_matrix,
'm': multiply_matrix
}
# ---------- Output routines ----------
def validate_input(filename: str):
# Reads input file and parses its header
with open(input_file_name, 'rt', encoding='utf-8') as input_file:
input_lines = input_file.readlines()
# validate file format
if input_lines[0] != 'EA979V3\n':
print('input file format not recognized!', file=sys.stderr)
sys.exit(1)
# get image dimensions
dimensions = input_lines[1].split()
width = int(dimensions[0])
height = int(dimensions[1])
# validate image dimensions
if width <= 0 or width > MAX_SIZE or height <= 0 or height > MAX_SIZE:
print('input file has invalid image dimensions: must be > 0 and '
f'<= {MAX_SIZE}!', file=sys.stderr)
sys.exit(1)
# initialize commands
commands = []
# validate each command
for line_n, line in enumerate(input_lines[2:], start=3):
if len(line) > MAX_LINE_LEN:
print(f'line {line_n}: line too long!', file=sys.stderr)
sys.exit(1)
if not line.strip():
# Blank line - skips
continue
command = line[0]
parameters = line[1:].strip().split()
if command == '#':
continue
if command not in COMMANDS:
print(f'command {command} not implemented', file=sys.stderr)
sys.exit(1)
commands.append({
'command': command,
'parameters': parameters,
})
# return validated input
return width, height, commands
def get_image(width: int, height: int, background: int = DEFAULT_BACKGROUND):
# return image
return np.full((height, width, 3),
fill_value=background,
dtype=IMAGE_DTYPE)
def put_string(output, output_file):
output = output.encode('ascii') if isinstance(output, str) else output
written_n = output_file.write(output)
if written_n != len(output):
print('error writing to output stream', file=sys.stderr)
sys.exit(1)
def save_ppm(image, output_file):
# Defines image header
magic_number_1 = 'P'
magic_number_2 = '6'
width = image.shape[1]
height = image.shape[0]
end_of_header = '\n'
# Writes header
put_string(magic_number_1, output_file)
put_string(magic_number_2, output_file)
put_string('\n', output_file)
put_string('%d %d\n' % (width, height), output_file)
put_string('%d' % MAX_VAL, output_file)
put_string(end_of_header, output_file)
# Outputs image
put_string(image.tobytes(), output_file)
# ---------- Drawing/model routines ----------
if __name__ == '__main__':
# Parses and checks command-line arguments
if len(sys.argv) != 3:
print("usage: python draw_2d_model.py <input.dat> <output.ppm>\n"
"interprets the drawing instructions in the input file and "
"renders the output in the NETPBM PPM format into output.ppm")
sys.exit(1)
input_file_name = sys.argv[1]
output_file_name = sys.argv[2]
# validate input
width, height, commands = validate_input(input_file_name)
# get image
image = get_image(width, height)
# Variable initializations
pen_color = np.array([0, 0, 0])
matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
# execute all commands
for command in commands:
# execute command
image, matrix = COMMANDS[command['command']](image,
pen_color,
matrix,
*command['parameters'])
# outputs rendered image file
with open(output_file_name, 'wb') as output_file:
save_ppm(image, output_file)
| gnsoares/UNICAMP_EA979_ComputerGraphics | lab01_2DDrawing/draw_2d_model.py | draw_2d_model.py | py | 4,803 | python | en | code | 0 | github-code | 13 |
3041308761 | from maze import *
import random
class Player:
def __init__(self):
self.location = (29,21)
self.moves = []
# n is how many moves
def randomMoves(self, n):
for i in range(n):
self.moves.append(random.randint(0,3))
if __name__ == '__main__':
p = Player()
p.randomMoves(10)
print(p.moves)
| cjaltic/Personal | maze evolution/player.py | player.py | py | 351 | python | en | code | 0 | github-code | 13 |
71087170257 | '''
segments.py segments an image using image segmentation algorithms
(currently felzenszwalb or slic) and returns those segments as transparent pngs.
'''
import numpy as np
import os
import os.path
from collagen import utils
from PIL import Image, ImageOps
from random import choice
from skimage.segmentation import felzenszwalb
from skimage.segmentation import slic
from skimage.util import img_as_float
def label_size(label,imarray):
return (imarray == label).sum()
def sorted_label_sizes(labels, imarray):
label_list = []
for label in labels:
label_list.append([label,label_size(label,imarray)])
label_list.sort(key=lambda l:l[1])
return label_list
def mask_felz(image, config):
#constants for felzenszwalb segmentation function
scale = config[':felzenszwalb'][':scale']
sigma = config[':felzenszwalb'][':sigma']
min_size = config[':felzenszwalb'][':min_size']
segments = felzenszwalb(image, scale, sigma, min_size)
return segments
def mask_slic(image, config):
#constants for slic
n_segments = config[':slic'][':n_segments']
compactness = config[':slic'][':compactness']
sigma = config[':slic'][':sigma']
segments = slic(image, n_segments, compactness, sigma)
return segments
def n_masks(im,n, config):
#returns n masks of largest segments
im = np.array(im, dtype=np.uint8)
im1 = img_as_float(im[::2, ::2])
segments = mask_felz(im1, config)
#segments = mask_slic(im1, config)
labels = np.unique(segments)
labelSizes = sorted_label_sizes(labels,segments)
#print('# segments ' + str(len(labels)))
largest_n_labels = labelSizes[-n-4:-2]
masks=[]
for label in largest_n_labels:
im2 = np.zeros(im.shape,dtype=np.uint8)
im2[segments == label[0]] = [255,255,255]
masks.append(Image.fromarray(im2))
return masks
def segments(source_image_location,n,output_folder, config):
#save n largest transparent segments to folder
img = Image.open(source_image_location)
im = img.convert('RGB')
masks= n_masks(im,n, config)
for i,mask in enumerate(masks):
mask = mask.convert('L')
bbox = mask.getbbox()
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
output = output.crop(bbox)
output.convert('RGBA')
segment_name = os.path.splitext(os.path.basename(source_image_location))[0] + '_s' + str(i) + '.png'
output.save(os.path.join(output_folder,segment_name))
| r3vl1s/collagen | collagen/segments.py | segments.py | py | 2,538 | python | en | code | 0 | github-code | 13 |
67336302 | import re
def parse(inpath):
with open(inpath, 'r') as input:
results = {}
for i, line in enumerate(input):
try:
date = re.split('\t', line)[14]
year = int(re.split('/', date)[0])
try:
results[year] += 1
except:
results[year] = 0
except:
if(i==0): pass
else:
print('{0} gave an error!'.format(line))
return results
def output(results, outpath):
with open(outpath, 'w') as output:
output.write('year\tcount\n')
for key in sorted(results.keys()):
output.write('{0}\t{1}\n'.format(key, results[key]))
if __name__ == "__main__":
directory = '/d/data'
filename = 'eukaryotes.txt'
inpath = '/'.join([directory, filename])
outname = 'euseqnums.tsv'
outpath = '/'.join([directory, outname])
results = parse(inpath)
output(results, outpath)
print('seqnumcount complete.') | xescape/scripts | misc/SequenceNumberCounter.py | SequenceNumberCounter.py | py | 1,096 | python | en | code | 0 | github-code | 13 |
14357051892 | import os,json
from time import gmtime, strftime, sleep
#Configuer au prealable la commande cli aws avec votre API SECRET KEY et aussi la region souhaité
#Voir https://docs.aws.amazon.com/cli/latest/index.html
FILE_CREATION_IP = "ip.json"
GOOD_FILE = 'good.log'
BAD_FILE = 'bad.log'
API_TELEGRAM_API_KEY = ""
#OBLIGATOIRE Fichier contenant les IPs à prendre en otage
IP_LIST = 'list.ip'
#Valeur int renvoyé si la commande exécuté avec succès
REP_GOOD = 0
def logger(e,l=BAD_FILE):
with open(l,'a') as f:
t = strftime("%a, %d %b %Y %H:%M:%S ", gmtime())
error = t+"->> "+e+"\n"
f.write(error)
def notification(notif):
try:
os.system('curl -s -X POST https://api.telegram.org/'+API_TELEGRAM_API_KEY+'/sendMessage -d chat_id=1746121134 -d text="'+notif+'"')
except Exception as e:
logger("Notification BUG : "+e)
def createEIP():
try:
cmd = "aws ec2 allocate-address --output json > "+FILE_CREATION_IP
c = os.system(cmd)
if c == REP_GOOD:
with open(FILE_CREATION_IP,"r") as f:
elasticIp = f.read()
elasticIp = json.loads(elasticIp)
print(str(elasticIp["AllocationId"]))
return elasticIp["PublicIp"]
else:
message = "Impossible de creer l'addresse IP code erreur : "+c
notification(message)
return False
except Exception as e:
logger("createEIP BUG : "+str(e))
def deleteEIP(AllocationId):
try:
cmd = "aws ec2 release-address --allocation-id "+AllocationId
c = os.system(cmd)
if c==REP_GOOD:
print("delete ip for allocation ID : "+AllocationId)
return c
except Exception as e:
logger("DeleteEIP BUG : "+e)
def verifcation(eIp,IP_LIST):
try:
cmd = "grep "+eIp+" "+IP_LIST
c = os.system(cmd)
if c==REP_GOOD:
message = "Bravo ! patron je viens de prendre en otage l'adresse : "+eIp+" en otage."
notification(message)
logger(eIp,GOOD_FILE)
else:
return False
except Exception as e:
logger("Verification BUG : "+e)
def main():
counter = 0
main()
| franckkragbe/bounty | verif.py | verif.py | py | 2,227 | python | en | code | 0 | github-code | 13 |
20674769941 | def hello():
print("Greetings, python user!")
def pack(param1, param2, param3):
print([param1, param2, param3])
return [param1, param2, param3]
def eat_lunch(list_input):
if len(list_input) == 0:
print("My lunchbox is empty")
elif len(list_input) ==1:
print("First I eat", list_input[0])
else:
print("First I eat", list_input[0])
for item in list_input[1:]:
print("Next I eat", item)
hello()
pack("sandwiches","hot dogs","pretzels")
eat_lunch([])
eat_lunch(["sandwiches"])
eat_lunch(["sandwiches","hot dogs","pretzels"]) | alicia-marie/local-python-setup | function_practice.py | function_practice.py | py | 592 | python | en | code | 0 | github-code | 13 |
35661055650 | from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from AIProjektZaliczeniowy.settings import MEDIA_URL
from Backend.forms import PicForm
from Backend.detect_face_image import detect_image
from Backend.models import Pic
def index(request):
media_url = MEDIA_URL
form = PicForm()
pics = Pic.objects.all().order_by('-id')
posts_paginator = Paginator(pics, 5)
page_number = request.GET.get('page')
page = posts_paginator.get_page(page_number)
if request.method == 'POST':
form = PicForm(request.POST, request.FILES)
if form.is_valid():
new_pic = form.save()
print(type(new_pic.pic))
detect_image(new_pic.pic)
return redirect('index')
context = {
'page': page,
'form': form,
'MEDIA_URL': media_url,
}
return render(request, 'index.html', context) | jakubmisiak/AIProject | Backend/views.py | views.py | py | 909 | python | en | code | 0 | github-code | 13 |
70609614738 | '''
Spark Custom Environment with the following modules:
vector-spark-module-py
vector-spark-module-r
foundry_ml
geopy
keras
python
r-base
seaborn
spacy
spacy-model-en_core_web_md
tensorflow
'''
def acled_cleaned(ds_1900_01_01_2022_03_21_Middle_East_Iraq_Syria):
# discard columns that are only known after the fact or are duplicative
cols_to_discard = [
"data_id",
"iso",
"event_id_cnty",
"event_id_no_cnty",
"event_date",
"year",
"time_precision",
"inter1",
"inter2",
"interaction",
"region",
"country",
"admin2",
"admin3",
"location",
"geo_precision",
"source",
"source_scale",
"iso3"
]
df = (
ds_1900_01_01_2022_03_21_Middle_East_Iraq_Syria
.withColumn('timestamp',
F.from_unixtime('timestamp')
.cast('Timestamp')
)
.drop(*cols_to_discard)
).toPandas()
df['latlong'] =df.latitude.astype(str) +", "+ df.longitude.astype(str)
df = df.loc[(df['actor1'] == 'Islamic State (Iraq)') | (df['actor1'] == 'Islamic State (Syria)')]
return df
def spacy_model():
import spacy
from foundry_ml import Model, Stage
# pass in a spacy model with vectors
model = SpacyModel('en_core_web_md')
return Model(Stage(model))
def model_inference(spacy_model, acled_cleaned):
cleaned_subset = acled_cleaned
df = cleaned_subset
df = df.loc[(df['actor1'] == 'Islamic State (Iraq)') | (df['actor1'] == 'Islamic State (Syria)')]
parsed = pd.DataFrame()
df=df.reset_index(drop=True)
df = df.dropna()
df['nlp_text'] =""
def nlp(x):
print(type(x))
result = spacy_model.transform(pd.DataFrame({"text": x}))
return result
parsed = parsed.append(df['notes'].transform(lambda x: nlp(x)))
return parsed
| decoy0ctopus/ACLED_Spacy_Entity_Recognition_Foundry | analysis.py | analysis.py | py | 1,835 | python | en | code | 0 | github-code | 13 |
17048235684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ReferenceId import ReferenceId
from alipay.aop.api.domain.ReferenceId import ReferenceId
class AnttechBlockchainDefinSaasPaymentCheckModel(object):
def __init__(self):
self._fund_mode = None
self._order_type = None
self._payee_out_member_id = None
self._payer_out_member_id = None
self._platform_member_id = None
@property
def fund_mode(self):
return self._fund_mode
@fund_mode.setter
def fund_mode(self, value):
self._fund_mode = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
self._order_type = value
@property
def payee_out_member_id(self):
return self._payee_out_member_id
@payee_out_member_id.setter
def payee_out_member_id(self, value):
if isinstance(value, ReferenceId):
self._payee_out_member_id = value
else:
self._payee_out_member_id = ReferenceId.from_alipay_dict(value)
@property
def payer_out_member_id(self):
return self._payer_out_member_id
@payer_out_member_id.setter
def payer_out_member_id(self, value):
if isinstance(value, ReferenceId):
self._payer_out_member_id = value
else:
self._payer_out_member_id = ReferenceId.from_alipay_dict(value)
@property
def platform_member_id(self):
return self._platform_member_id
@platform_member_id.setter
def platform_member_id(self, value):
self._platform_member_id = value
def to_alipay_dict(self):
params = dict()
if self.fund_mode:
if hasattr(self.fund_mode, 'to_alipay_dict'):
params['fund_mode'] = self.fund_mode.to_alipay_dict()
else:
params['fund_mode'] = self.fund_mode
if self.order_type:
if hasattr(self.order_type, 'to_alipay_dict'):
params['order_type'] = self.order_type.to_alipay_dict()
else:
params['order_type'] = self.order_type
if self.payee_out_member_id:
if hasattr(self.payee_out_member_id, 'to_alipay_dict'):
params['payee_out_member_id'] = self.payee_out_member_id.to_alipay_dict()
else:
params['payee_out_member_id'] = self.payee_out_member_id
if self.payer_out_member_id:
if hasattr(self.payer_out_member_id, 'to_alipay_dict'):
params['payer_out_member_id'] = self.payer_out_member_id.to_alipay_dict()
else:
params['payer_out_member_id'] = self.payer_out_member_id
if self.platform_member_id:
if hasattr(self.platform_member_id, 'to_alipay_dict'):
params['platform_member_id'] = self.platform_member_id.to_alipay_dict()
else:
params['platform_member_id'] = self.platform_member_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainDefinSaasPaymentCheckModel()
if 'fund_mode' in d:
o.fund_mode = d['fund_mode']
if 'order_type' in d:
o.order_type = d['order_type']
if 'payee_out_member_id' in d:
o.payee_out_member_id = d['payee_out_member_id']
if 'payer_out_member_id' in d:
o.payer_out_member_id = d['payer_out_member_id']
if 'platform_member_id' in d:
o.platform_member_id = d['platform_member_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AnttechBlockchainDefinSaasPaymentCheckModel.py | AnttechBlockchainDefinSaasPaymentCheckModel.py | py | 3,704 | python | en | code | 241 | github-code | 13 |
31942281480 | from collections import defaultdict
from typing import List
"""
方法一:并查集
思路及解法
既然可以任意地交换通过“索引对”直接相连的字符,那么我们也任意地交换
通过“索引对”间接相连的字符。我们利用这个性质将该字符串抽象:将每
一个字符抽象为“点”,那么这些“索引对”即为“边”,我们只需要维护这个
“图”的连通性即可。对于同属一个连通块(极大连通子图)内的字符,我们
可以任意地交换它们。
这样我们的思路就很清晰了:利用并查集维护任意两点的连通性,将同属一个
连通块内的点提取出来,直接排序后放置回其在字符串中的原位置即可。
"""
# @lc code=start
class UnionFind:
def __init__(self, n: int):
self.par = list(range(n))
def union(self, x: int, y: int) -> None:
self.par[self.find(x)] = self.find(y)
def find(self, x: int) -> int:
if self.par[x] != x:
self.par[x] = self.find(self.par[x])
return self.par[x]
def connected(self, x: int, y: int) -> bool:
return self.find(x) == self.find(y)
class Solution:
def smallestStringWithSwaps(self, s: str, pairs: List[List[int]]) -> str:
if not pairs:
return s
n = len(s)
uf = UnionFind(n)
for i, j in pairs:
uf.union(i, j)
blocks = defaultdict(list)
for i in range(n):
blocks[uf.find(i)].append(s[i])
for block in blocks.values():
block.sort(reverse=True)
ans = []
for i in range(n):
ans.append(blocks[uf.find(i)].pop())
return ''.join(ans)
# @lc code=end
if __name__ == "__main__":
solu = Solution()
print(solu.smallestStringWithSwaps(s="dcab", pairs=[[0, 3], [1, 2]]))
s = "dcab"
pairs = [[0, 3], [1, 2], [0, 2]]
print(solu.smallestStringWithSwaps(s, pairs))
print(solu.smallestStringWithSwaps(s="cba", pairs=[[0, 1], [1, 2]]))
| wylu/leetcodecn | src/python/p1200to1299/1202.交换字符串中的元素.py | 1202.交换字符串中的元素.py | py | 2,043 | python | zh | code | 3 | github-code | 13 |
14658412694 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 14:38:22 2018
@author: luiss
"""
from numpy import empty, zeros, full, sqrt, cos, sin, pi
from random import random, randint, seed
from pylab import figure, plot, show
# subplot(111)
# subplot(111).clear()
# pause
# pasos(m) distancia(dr)
# 100 5-20
# 200 5-60
# 300 10-30
# 500 20-40
# 1000 30-60
N = 1000 # numero de paseantes
m = 400 # numero de pasos de cada paseantedr = 10
dr = 10
r = dr
margin = 1
C=m+1 # posicion del centro
L=m*2+1 # longitud de las posiciones
posiciones = full((L,L), False)
#vecinos
nx = [-1, -1, -1, 0, 0, 1, 1, 1]
ny = [ 1, 0, -1, 1, -1, 1, 0, -1]
# randint(7)
def rand_move(raux):
phi = random()*2*pi
return int(cos( phi ) * raux), int(sin( phi ) * raux)
seed(10)
posiciones[C,C] = True #semilla inicial
i = 0
j = 0
while i < N:
print("paso ",i)
rx, ry = rand_move(r)
x = C + rx
y = C + ry
j = 0
while j < m:
ran = randint(0,7)
x += nx[ran]
y += ny[ran]
if (x-C)*(x-C) + (y-C)*(y-C) > (r + margin*dr)*(r + margin*dr):
j=m
for k in range(7):
if posiciones[x + nx[k], y + ny[k]] == True:
posiciones[x,y] = True
i += 1
j = m
r = max(sqrt((C-x)*(C-x) + (C-y)*(C-y)) + dr, r)
break
j += 1
# print
xplot=[]
yplot=[]
for i in range(L):
for j in range(L):
if posiciones[i,j] == True:
xplot.append(i-C)
yplot.append(j-C)
plot(xplot,yplot,'s')
"""
plot(range(N),x[:,1],'.-')
plot(range(N),x[:,2],'.-')
plot(range(N),x[:,3],'.-')
figure()
plot(x[:,1],y[:,1],'s-')
"""
| luisizq/Advanced_Physics_Computing | Assignment 6- Diffusive-limitted-aggregation/practica6_DAL_version1.py | practica6_DAL_version1.py | py | 1,737 | python | en | code | 0 | github-code | 13 |
26348805770 | from lib.income_statement import IncomeStatement
from lib.balance_sheet import BalanceSheet
from lib.cashflow import Cashflow
from lib.key_ratio import KeyRatio
from lib.quote import Quote
from lib.util import log
from lib.dcf import DCF
from lib import plot_tool
import pandas as pd
class Business():
def __init__(self, code):
self.code = code
self.income = IncomeStatement(self.code)
self.balance = BalanceSheet(self.code)
self.cashflow = Cashflow(self.code)
self.keyRatio = KeyRatio(self.code)
self.quote = Quote(self.code)
self.dcf = DCF()
def fetchData(self, force=False):
self.income.fetchData(force)
self.balance.fetchData(force)
self.cashflow.fetchData(force)
self.keyRatio.fetchData(force)
self.quote.fetchData(force)
log('Fetch all data finish!')
def debtEquityReport(self):
dfDebtEquity = self.balance.df[[
'Short-term debt',
'Other long-term liabilities',
"Total stockholders' equity",
'Total current assets',
'Total current liabilities',
]]
dfDebtEquity = dfDebtEquity.apply(pd.to_numeric)
dfDebtEquity['负债权益比'] = round((dfDebtEquity['Short-term debt'] + dfDebtEquity['Other long-term liabilities'])/dfDebtEquity["Total stockholders' equity"],2)
dfDebtEquity['流动比率'] = round(dfDebtEquity['Total current assets']/dfDebtEquity['Total current liabilities'], 2)
return dfDebtEquity
def chartBookValue(self):
plot_tool.bar(
self.keyRatio.df.index,
self.keyRatio.df['Book Value Per Share * CNY'],
title='每股净资产'
)
def chartEPS(self):
plot_tool.bar(
self.keyRatio.df.index,
self.keyRatio.df['Earnings Per Share CNY'],
title='EPS'
)
def chartFCF(self):
plot_tool.bar(
self.cashflow.df.index,
self.cashflow.df['Free cash flow'],
title='自由现金流',
)
def chartPredictFCF(self, fcfReport):
fcf = self.cashflow.df['Free cash flow']
yearNum = fcfReport['Free cash flow'].count()
dfPredict = self.dcf.predictWithLinearRegression(fcf, yearNum, withPassYear=True)
plot_tool.fcfAndPredictFcf(
fcf.index,
self.cashflow.df['Free cash flow'],
dfPredict.index,
dfPredict['Free cash flow'],
title1='自由现金流线性回归',
title2='自由现金流预测',
)
def showDCFReport(self):
## dcf calculation
beta = float(self.quote.df['Beta'].iloc[0])
taxRate = self.taxRate()
marketCap = self.marketCap()
marketDebt = 0
fcf = self.cashflow.df['Free cash flow']
factorReport, fcfReport = self.dcf.calculationReport(beta, taxRate, marketCap, marketDebt, fcf, predictYear=5)
## valuation
fcfPresentSum = fcfReport['fcf present'].sum() + fcfReport['terminal value present'].sum()
value = fcfPresentSum - marketDebt
shares = self.sharesOutstanding()
valuePerShare = round(value / shares, 2)
valueData = {
'Present Value Sum(M)': [fcfPresentSum],
'Intrinsic Value(M)': [value],
'Market Debt(M)': [marketDebt],
'Shares Outstanding(M)': [shares],
'Value Per Share': [valuePerShare],
}
valuationReport = pd.DataFrame(valueData)
return factorReport, fcfReport, valuationReport
def taxRate(self):
dfTaxRate = self.income.df['Provision for income taxes'] / self.income.df['Income before taxes']
return round(dfTaxRate.mean(),4)
def sharesOutstanding(self):
value = self.quote.df['Shares Outstanding'].iloc[0]
unit = value[-1]
if unit == 'B':
mc = float(value[:-1]) * 1000
else:
mc = float(value[:-1])
return mc
def marketCap(self):
value = self.quote.df['Market Cap.'].iloc[0]
unit = value[-1]
if unit == 'B':
mc = float(value[:-1]) * 1000
else:
mc = float(value[:-1])
return mc
| jpsiyu/stock-analysis | lib/business.py | business.py | py | 4,279 | python | en | code | 0 | github-code | 13 |
43822427075 | import jieba
import jieba.analyse
class Segment(object):
def __init__(self):
self.seg_list = ''
def cut(self, string_):
# 返回的是 generator
self.seg_list = jieba.cut(string_) # 默认是精确模式
return self.seg_list
def extract_keywords(self, string_):
# 返回的是 list
# jieba.analyse.extract_tags(
# sentence, topK=20, withWeight=False, allowPOS=())
return jieba.analyse.extract_tags(
string_, topK=2, withFlag=False, allowPOS=())
if __name__ == '__main__':
s = Segment()
content = '中国人民日报'
print('/'.join(s.cut(content)))
print(s.extract_keywords(content))
| tanx-code/levelup | howtorap/utils/scanner.py | scanner.py | py | 697 | python | en | code | 0 | github-code | 13 |
73257321296 | from collections import OrderedDict
values = ["I", "IV", "V", "IX", "X", "XL", "L", "XC", "C", "CD", "D", "CM", "M"]
keys = [1, 4, 5, 9, 10, 40, 50, 90, 100, 400, 500, 900, 1000]
digits = OrderedDict(zip(keys[::-1], values[::-1]))
def dec2rom(i):
res = ''
while i > 0:
for k in digits.keys():
if i - k >= 0:
max_value = k
break
i = i - max_value
res += digits[max_value]
return res
print(dec2rom(1996)) | chisler/basic_algorithms | roman_numerals/dec_2_roman.py | dec_2_roman.py | py | 486 | python | en | code | 0 | github-code | 13 |
8972180408 | from luigi import Parameter, BoolParameter
from luigi.contrib.s3 import S3Target
from ob_pipelines import LoggingTaskWrapper
from ob_pipelines.apps.kallisto import merge_column
from ob_pipelines.config import settings
from ob_pipelines.entities.persistence import get_samples_by_experiment_id
from ob_pipelines.pipelines.rnaseq.tasks.kallisto import Kallisto
from ob_pipelines.s3 import csv_to_s3
class MergeKallisto(LoggingTaskWrapper):
expt_id = Parameter()
annot = BoolParameter(False)
def requires(self):
return {
sample_id: Kallisto(sample_id=sample_id)
for sample_id in get_samples_by_experiment_id(self.expt_id)
}
def output(self):
prefix = '{}/{}/'.format(settings.get_target_bucket(), self.expt_id)
out_dict = {
'est_counts': S3Target(prefix + 'est_counts.csv'),
'tpm': S3Target(prefix + 'tpm.csv')
}
if self.annot:
out_dict['annotations'] = S3Target(prefix + 'annotations.csv')
return out_dict
def run(self):
# Gather input filepaths and labels
tgt_dict = self.input()
sample_ids = list(tgt_dict.keys())
fpaths = [tgt_dict[sample_id]['abundance'].path for sample_id in sample_ids]
# Merge columns
annotations, est_counts = merge_column(fpaths, sample_ids, data_col='est_counts', annot=self.annot)
annotations, tpm = merge_column(fpaths, sample_ids, data_col='tpm', annot=self.annot)
if self.annot:
csv_to_s3(annotations, self.output()['annotations'].path)
csv_to_s3(est_counts, self.output()['est_counts'].path)
csv_to_s3(tpm, self.output()['tpm'].path)
| outlierbio/ob-pipelines | ob_pipelines/tasks/merge_kallisto.py | merge_kallisto.py | py | 1,700 | python | en | code | 11 | github-code | 13 |
25102867716 | '''
count_change(amount, kinds_of_coins) which returns the number of ways
to return change for a given amount n.
The change can be returned using coins worth 100 cents, 50 cents,
20 cents, 10 cents, 5 cents or 1 cent.
For example, count_change(5,2) refers to the number of ways
to get 5 cents using only 5-cents and 1-cent coins.
'''
'''
count_change(5,2)
ah shit
maxdmm 5
amt-mx 0
count_change(0,2)
ah shit
amt
return 1
count_change(5,1)
ah shit
maxdmm 1
amt-mx 4
count(4,1)
ah shit
....
count(0,1)
ah shit
amt
return 1
'''
# DETERMINE THE HIGHEST COIN DENOMINATION YOU CAN RETURN FOR A CERTAIN AMOUNT
def maxDenom(amount, kinds_of_coins):
#max 100 cent
if kinds_of_coins == 6 and amount>=100:
return 100
#max 50 cent
if kinds_of_coins >= 5 and amount>=50:
return 50
#max 20 cent
if kinds_of_coins >= 4 and amount>=20:
return 20
#max 10 cent
if kinds_of_coins >= 3 and amount>=10:
return 10
#max 5 cent
if kinds_of_coins >= 2 and amount>=5:
return 5
#max 1 cent
if kinds_of_coins >= 1 and amount>=1:
return 1
def count_change(amount, kinds_of_coins):
#print("ah shit here we go again")
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''' EDGE CASE: either kinds_of_coins was subtracted till it becomes zero '''
''' (see the count_change(amount, kinds_of_coins-1) function) '''
''' OR user keys in zero. '''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
if kinds_of_coins == 0:
#print("koc")
return 0
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''' BASE CASE: amount already reduced to zero by the else block via '''
''' recursion '''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
if amount == 0:
#print("amt")
return 1
###############################################################################
#### RECURSIVE STEP: reduce the current amount to zero. This is done by: ####
#### 1) Find out the max denom for the current value ####
#### 2) Subtract this max denom from the current value ####
#### 3) Do step 2 recursively until base case is attained, i.e. amt == 0 ####
#### 4) Steps 1 to 3 will cause one run of the base case's "return 1". ####
#### 5) Remember that we need to do steps 1 to 4 for each possible value ####
#### of kinds_of_coins--. Hence recursively call this function for ####
#### kinds_of_coins-1. This will also decompose into the base case. ####
#### 6) Return both these functions. ####
###############################################################################
else:
maxDenomm = maxDenom(amount, kinds_of_coins)
#print(" maxDenomm", maxDenomm)
#print("amount-maxDenomm", amount-maxDenomm)
return count_change(amount-maxDenomm, kinds_of_coins) + count_change(amount, kinds_of_coins-1)
print(count_change(15,3))
| bleow/CZ1103-IntroToCS_Python | aengus.py | aengus.py | py | 3,349 | python | en | code | 0 | github-code | 13 |
72686140818 | """
--- Part Two ---
It turns out that this circuit is very timing-sensitive; you
actually need to minimize the signal delay.
To do this, calculate the number of steps each wire takes to reach each
intersection; choose the intersection where the sum of both wires' steps is
lowest. If a wire visits a position on the grid multiple times, use the steps
value from the first time it visits that position when calculating the total
value of a specific intersection.
The number of steps a wire takes is the total number of grid squares the wire
has entered to get to that location, including the intersection being
considered. Again consider the example from above:
...........
.+-----+...
.|.....|...
.|..+--X-+.
.|..|..|.|.
.|.-X--+.|.
.|..|....|.
.|.......|.
.o-------+.
...........
In the above example, the intersection closest to the central port is reached
after 8+5+5+2 = 20 steps by the first wire and 7+6+4+3 = 20 steps by the second
wire for a total of 20+20 = 40 steps.
However, the top-right intersection is better: the first wire takes only 8+5+2 =
15 and the second wire takes only 7+6+2 = 15, a total of 15+15 = 30 steps.
Here are the best steps for the extra examples from above:
- R75,D30,R83,U83,L12,D49,R71,U7,L72
U62,R66,U55,R34,D71,R55,D58,R83 = 610 steps
- R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7 = 410 steps
What is the fewest combined steps the wires must take to reach an
intersection?
"""
if __name__ == '__main__':
# matplotlib.use('Qt5Agg')
with open('03/input', 'r') as f:
lines = f.readlines()
wire1 = [i.strip('\n') for i in lines[0].split(',')]
wire2 = [i.strip('\n') for i in lines[1].split(',')]
length = 200000
wire1_pos = [(0,0)] * length
i = 0
for instruction in wire1:
direction = instruction[0]
distance = int(instruction[1:])
cur_pos = wire1_pos[i]
if direction == 'R':
for j in range(distance):
next_pos = (cur_pos[0] + 1, cur_pos[1])
wire1_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
elif direction == 'L':
for j in range(distance):
next_pos = (cur_pos[0] - 1, cur_pos[1])
wire1_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
elif direction == 'U':
for j in range(distance):
next_pos = (cur_pos[0] , cur_pos[1] + 1)
wire1_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
elif direction == 'D':
for j in range(distance):
next_pos = (cur_pos[0] , cur_pos[1] - 1)
wire1_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
wire2_pos = [(0,0)] * length
i = 0
for instruction in wire2:
direction = instruction[0]
distance = int(instruction[1:])
cur_pos = wire2_pos[i]
if direction == 'R':
for j in range(distance):
next_pos = (cur_pos[0] + 1, cur_pos[1])
wire2_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
elif direction == 'L':
for j in range(distance):
next_pos = (cur_pos[0] - 1, cur_pos[1])
wire2_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
elif direction == 'U':
for j in range(distance):
next_pos = (cur_pos[0] , cur_pos[1] + 1)
wire2_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
elif direction == 'D':
for j in range(distance):
next_pos = (cur_pos[0] , cur_pos[1] - 1)
wire2_pos[i+1] = next_pos
cur_pos = next_pos
i += 1
for i in range(length-1, 0, -1):
if wire1_pos[i] == (0,0):
last_wire1_index = i - 1
if wire2_pos[i] == (0,0):
last_wire2_index = i - 1
print(last_wire1_index)
print(last_wire2_index)
wire1_pos = wire1_pos[:last_wire1_index]
wire2_pos = wire2_pos[:last_wire2_index]
intersections = set(wire1_pos) & set(wire2_pos)
times = [0] * len(intersections)
for i, (x, y) in enumerate(intersections):
wire1_steps = wire1_pos.index((x, y))
wire2_steps = wire2_pos.index((x, y))
times[i] = wire1_steps + wire2_steps
while 0 in times:
times.remove(0)
print(times)
# print(wire1_pos[:5])
# print(wire2_pos[:5])
print(min(times))
# Answer is | jat255/advent_of_code | 03/puzz2.py | puzz2.py | py | 4,688 | python | en | code | 0 | github-code | 13 |
8642235852 | # ISO3166 python dict
# oficial list in http://www.iso.org/iso/iso_3166_code_lists
countries = {
'AF': 'AFGHANISTAN',
'AX': 'ÅLAND ISLANDS',
'AL': 'ALBANIA',
'DZ': 'ALGERIA',
'AS': 'AMERICAN SAMOA',
'AD': 'ANDORRA',
'AO': 'ANGOLA',
'AI': 'ANGUILLA',
'AQ': 'ANTARCTICA',
'AG': 'ANTIGUA AND BARBUDA',
'AR': 'ARGENTINA',
'AM': 'ARMENIA',
'AW': 'ARUBA',
'AU': 'AUSTRALIA',
'AT': 'AUSTRIA',
'AZ': 'AZERBAIJAN',
'BS': 'BAHAMAS',
'BH': 'BAHRAIN',
'BD': 'BANGLADESH',
'BB': 'BARBADOS',
'BY': 'BELARUS',
'BE': 'BELGIUM',
'BZ': 'BELIZE',
'BJ': 'BENIN',
'BM': 'BERMUDA',
'BT': 'BHUTAN',
'BO': 'BOLIVIA, PLURINATIONAL STATE OF',
'BQ': 'BONAIRE, SINT EUSTATIUS AND SABA',
'BA': 'BOSNIA AND HERZEGOVINA',
'BW': 'BOTSWANA',
'BV': 'BOUVET ISLAND',
'BR': 'BRAZIL',
'IO': 'BRITISH INDIAN OCEAN TERRITORY',
'BN': 'BRUNEI DARUSSALAM',
'BG': 'BULGARIA',
'BF': 'BURKINA FASO',
'BI': 'BURUNDI',
'KH': 'CAMBODIA',
'CM': 'CAMEROON',
'CA': 'CANADA',
'CV': 'CAPE VERDE',
'KY': 'CAYMAN ISLANDS',
'CF': 'CENTRAL AFRICAN REPUBLIC',
'TD': 'CHAD',
'CL': 'CHILE',
'CN': 'CHINA',
'CX': 'CHRISTMAS ISLAND',
'CC': 'COCOS (KEELING) ISLANDS',
'CO': 'COLOMBIA',
'KM': 'COMOROS',
'CG': 'CONGO',
'CD': 'CONGO, THE DEMOCRATIC REPUBLIC OF THE',
'CK': 'COOK ISLANDS',
'CR': 'COSTA RICA',
'CI': 'CÔTE D\'IVOIRE',
'HR': 'CROATIA',
'CU': 'CUBA',
'CW': 'CURAÇAO',
'CY': 'CYPRUS',
'CZ': 'CZECH REPUBLIC',
'DK': 'DENMARK',
'DJ': 'DJIBOUTI',
'DM': 'DOMINICA',
'DO': 'DOMINICAN REPUBLIC',
'EC': 'ECUADOR',
'EG': 'EGYPT',
'SV': 'EL SALVADOR',
'GQ': 'EQUATORIAL GUINEA',
'ER': 'ERITREA',
'EE': 'ESTONIA',
'ET': 'ETHIOPIA',
'FK': 'FALKLAND ISLANDS (MALVINAS)',
'FO': 'FAROE ISLANDS',
'FJ': 'FIJI',
'FI': 'FINLAND',
'FR': 'FRANCE',
'GF': 'FRENCH GUIANA',
'PF': 'FRENCH POLYNESIA',
'TF': 'FRENCH SOUTHERN TERRITORIES',
'GA': 'GABON',
'GM': 'GAMBIA',
'GE': 'GEORGIA',
'DE': 'GERMANY',
'GH': 'GHANA',
'GI': 'GIBRALTAR',
'GR': 'GREECE',
'GL': 'GREENLAND',
'GD': 'GRENADA',
'GP': 'GUADELOUPE',
'GU': 'GUAM',
'GT': 'GUATEMALA',
'GG': 'GUERNSEY',
'GN': 'GUINEA',
'GW': 'GUINEA-BISSAU',
'GY': 'GUYANA',
'HT': 'HAITI',
'HM': 'HEARD ISLAND AND MCDONALD ISLANDS',
'VA': 'HOLY SEE (VATICAN CITY STATE)',
'HN': 'HONDURAS',
'HK': 'HONG KONG',
'HU': 'HUNGARY',
'IS': 'ICELAND',
'IN': 'INDIA',
'ID': 'INDONESIA',
'IR': 'IRAN, ISLAMIC REPUBLIC OF',
'IQ': 'IRAQ',
'IE': 'IRELAND',
'IM': 'ISLE OF MAN',
'IL': 'ISRAEL',
'IT': 'ITALY',
'JM': 'JAMAICA',
'JP': 'JAPAN',
'JE': 'JERSEY',
'JO': 'JORDAN',
'KZ': 'KAZAKHSTAN',
'KE': 'KENYA',
'KI': 'KIRIBATI',
'KP': 'KOREA, DEMOCRATIC PEOPLE\'S REPUBLIC OF',
'KR': 'KOREA, REPUBLIC OF',
'KW': 'KUWAIT',
'KG': 'KYRGYZSTAN',
'LA': 'LAO PEOPLE\'S DEMOCRATIC REPUBLIC',
'LV': 'LATVIA',
'LB': 'LEBANON',
'LS': 'LESOTHO',
'LR': 'LIBERIA',
'LY': 'LIBYAN ARAB JAMAHIRIYA',
'LI': 'LIECHTENSTEIN',
'LT': 'LITHUANIA',
'LU': 'LUXEMBOURG',
'MO': 'MACAO',
'MK': 'MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF',
'MG': 'MADAGASCAR',
'MW': 'MALAWI',
'MY': 'MALAYSIA',
'MV': 'MALDIVES',
'ML': 'MALI',
'MT': 'MALTA',
'MH': 'MARSHALL ISLANDS',
'MQ': 'MARTINIQUE',
'MR': 'MAURITANIA',
'MU': 'MAURITIUS',
'YT': 'MAYOTTE',
'MX': 'MEXICO',
'FM': 'MICRONESIA, FEDERATED STATES OF',
'MD': 'MOLDOVA, REPUBLIC OF',
'MC': 'MONACO',
'MN': 'MONGOLIA',
'ME': 'MONTENEGRO',
'MS': 'MONTSERRAT',
'MA': 'MOROCCO',
'MZ': 'MOZAMBIQUE',
'MM': 'MYANMAR',
'NA': 'NAMIBIA',
'NR': 'NAURU',
'NP': 'NEPAL',
'NL': 'NETHERLANDS',
'NC': 'NEW CALEDONIA',
'NZ': 'NEW ZEALAND',
'NI': 'NICARAGUA',
'NE': 'NIGER',
'NG': 'NIGERIA',
'NU': 'NIUE',
'NF': 'NORFOLK ISLAND',
'MP': 'NORTHERN MARIANA ISLANDS',
'NO': 'NORWAY',
'OM': 'OMAN',
'PK': 'PAKISTAN',
'PW': 'PALAU',
'PS': 'PALESTINIAN TERRITORY, OCCUPIED',
'PA': 'PANAMA',
'PG': 'PAPUA NEW GUINEA',
'PY': 'PARAGUAY',
'PE': 'PERU',
'PH': 'PHILIPPINES',
'PN': 'PITCAIRN',
'PL': 'POLAND',
'PT': 'PORTUGAL',
'PR': 'PUERTO RICO',
'QA': 'QATAR',
'RE': 'RÉUNION',
'RO': 'ROMANIA',
'RU': 'RUSSIAN FEDERATION',
'RW': 'RWANDA',
'BL': 'SAINT BARTHÉLEMY',
'SH': 'SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA',
'KN': 'SAINT KITTS AND NEVIS',
'LC': 'SAINT LUCIA',
'MF': 'SAINT MARTIN (FRENCH PART)',
'PM': 'SAINT PIERRE AND MIQUELON',
'VC': 'SAINT VINCENT AND THE GRENADINES',
'WS': 'SAMOA',
'SM': 'SAN MARINO',
'ST': 'SAO TOME AND PRINCIPE',
'SA': 'SAUDI ARABIA',
'SN': 'SENEGAL',
'RS': 'SERBIA',
'SC': 'SEYCHELLES',
'SL': 'SIERRA LEONE',
'SG': 'SINGAPORE',
'SX': 'SINT MAARTEN (DUTCH PART)',
'SK': 'SLOVAKIA',
'SI': 'SLOVENIA',
'SB': 'SOLOMON ISLANDS',
'SO': 'SOMALIA',
'ZA': 'SOUTH AFRICA',
'GS': 'SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS',
'SS': 'SOUTH SUDAN',
'ES': 'SPAIN',
'LK': 'SRI LANKA',
'SD': 'SUDAN',
'SR': 'SURINAME',
'SJ': 'SVALBARD AND JAN MAYEN',
'SZ': 'SWAZILAND',
'SE': 'SWEDEN',
'CH': 'SWITZERLAND',
'SY': 'SYRIAN ARAB REPUBLIC',
'TW': 'TAIWAN, PROVINCE OF CHINA',
'TJ': 'TAJIKISTAN',
'TZ': 'TANZANIA, UNITED REPUBLIC OF',
'TH': 'THAILAND',
'TL': 'TIMOR-LESTE',
'TG': 'TOGO',
'TK': 'TOKELAU',
'TO': 'TONGA',
'TT': 'TRINIDAD AND TOBAGO',
'TN': 'TUNISIA',
'TR': 'TURKEY',
'TM': 'TURKMENISTAN',
'TC': 'TURKS AND CAICOS ISLANDS',
'TV': 'TUVALU',
'UG': 'UGANDA',
'UA': 'UKRAINE',
'AE': 'UNITED ARAB EMIRATES',
'GB': 'UNITED KINGDOM',
'US': 'UNITED STATES',
'UM': 'UNITED STATES MINOR OUTLYING ISLANDS',
'UY': 'URUGUAY',
'UZ': 'UZBEKISTAN',
'VU': 'VANUATU',
'VE': 'VENEZUELA, BOLIVARIAN REPUBLIC OF',
'VN': 'VIET NAM',
'VG': 'VIRGIN ISLANDS, BRITISH',
'VI': 'VIRGIN ISLANDS, U.S.',
'WF': 'WALLIS AND FUTUNA',
'EH': 'WESTERN SAHARA',
'YE': 'YEMEN',
'ZM': 'ZAMBIA',
'ZW': 'ZIMBABWE',
'EU': 'Europe',
'WD': 'World'
}
europe = ['AL', 'AD', 'AT', 'BY', 'BE', 'BA', 'BG', 'HR', 'CY', 'CZ', 'DK', 'EE', 'FO', 'FI', 'FR', 'DE', 'GI', 'GR', 'HU', 'IS', 'IE', 'IM', 'IT', 'XK', 'LV', 'LI', 'LT', 'LU', 'MK', 'MT', 'MD', 'MC', 'ME', 'NL', 'NO', 'PL', 'PT', 'RO', 'RU', 'SM', 'RS', 'SK', 'SI', 'ES', 'SE', 'CH', 'UA', 'GB', 'VA']
wc_countries = {
'WD': 'World',
'US': 'USA',
'CN': 'China',
'IT': 'Italy',
'ES': 'Spain',
'FR': 'France',
'DE': 'Germany',
'GB': 'UK',
'IR': 'Iran',
'YT': 'Mayotte',
'CH': 'Switzerland',
'UA': 'Ukraine',
'EU': 'Europe',
'KR': 'S. Korea',
'NL': 'Netherlands',
'SX': 'Sint Maarten',
'AT': 'Austria',
'BE': 'Belgium',
'CA': 'Canada',
'TR': 'Turkey',
'PT': 'Portugal',
'NO': 'Norway',
'BR': 'Brazil',
'SE': 'Sweden',
'AU': 'Australia',
'IL': 'Israel',
'MY': 'Malaysia',
'CZ': 'Czechia',
'DK': 'Denmark',
'IE': 'Ireland',
'LU': 'Luxembourg',
'JP': 'Japan',
'EC': 'Ecuador',
'CL': 'Chile',
'PL': 'Poland',
'PK': 'Pakistan',
'TH': 'Thailand',
'RO': 'Romania',
'SA': 'Saudi Arabia',
'FI': 'Finland',
'ZA': 'South Africa',
'ID': 'Indonesia',
'GR': 'Greece',
'RU': 'Russia',
'IS': 'Iceland',
'IN': 'India',
'PH': 'Philippines',
'SG': 'Singapore',
'PE': 'Peru',
'SI': 'Slovenia',
'PA': 'Panama',
'QA': 'Qatar',
'EE': 'Estonia',
'AR': 'Argentina',
'EG': 'Egypt',
'HR': 'Croatia',
'CO': 'Colombia',
'DO': 'Dominican Republic',
'MX': 'Mexico',
'BH': 'Bahrain',
'RS': 'Serbia',
'HK': 'Hong Kong',
'IQ': 'Iraq',
'LB': 'Lebanon',
'DZ': 'Algeria',
'UAE': 'UAE',
'LT': 'Lithuania',
'AM': 'Armenia',
'NZ': 'New Zealand',
'MA': 'Morocco',
'BG': 'Bulgaria',
'HU': 'Hungary',
'TW': 'Taiwan',
'LV': 'Latvia',
'CR': 'Costa Rica',
'SK': 'Slovakia',
'AD': 'Andorra',
'UY': 'Uruguay',
'JO': 'Jordan',
'SM': 'San Marino',
'KW': 'Kuwait',
'MK': 'North Macedonia',
'TN': 'Tunisia',
'BA': 'Bosnia and Herzegovina',
'MD': 'Moldova',
'AL': 'Albania',
'VN': 'Vietnam',
'BF': 'Burkina Faso',
'CY': 'Cyprus',
'FO': 'Faeroe Islands',
'RE': 'Réunion',
'MT': 'Malta',
'GH': 'Ghana',
'AZ': 'Azerbaijan',
'BN': 'Brunei',
'KZ': 'Kazakhstan',
'OM': 'Oman',
'LK': 'Sri Lanka',
'VE': 'Venezuela',
'SN': 'Senegal',
'KH': 'Cambodia',
'CI': 'Ivory Coast',
'AF': 'Afghanistan',
'BY': 'Belarus',
'PS': 'Palestine',
'MU': 'Mauritius',
'GE': 'Georgia',
'CM': 'Cameroon',
'UZ': 'Uzbekistan',
'ME': 'Montenegro',
'CU': 'Cuba',
'MQ': 'Martinique',
'NE': 'Niger',
'TT': 'Trinidad and Tobago',
'LI': 'Liechtenstein',
'HN': 'Honduras',
'CD': 'DRC',
'BD': 'Bangladesh',
'KG': 'Kyrgyzstan',
'BO': 'Bolivia',
'PY': 'Paraguay',
'RW': 'Rwanda',
'MC': 'Monaco',
'KE': 'Kenya',
'MO': 'Macao',
'PF': 'French Polynesia',
'GF': 'French Guiana',
'JM': 'Jamaica',
'GI': 'Gibraltar',
'IM': 'Isle of Man',
'GT': 'Guatemala',
'MG': 'Madagascar',
'TG': 'Togo',
'AW': 'Aruba',
'BB': 'Barbados',
'ZM': 'Zambia',
'NC': 'New Caledonia',
'UG': 'Uganda',
'SV': 'El Salvador',
'MV': 'Maldives',
'TZ': 'Tanzania',
'GQ': 'Equatorial Guinea',
'ET': 'Ethiopia',
'DJ': 'Djibouti',
'DM': 'Dominica',
'MN': 'Mongolia',
'MF': 'Saint Martin',
'KY': 'Cayman Islands',
'HT': 'Haiti',
'NA': 'Namibia',
'SR': 'Suriname',
'GA': 'Gabon',
'BM': 'Bermuda',
'MZ': 'Mozambique',
'SC': 'Seychelles',
'BJ': 'Benin',
'GL': 'Greenland',
'LA': 'Laos',
'GY': 'Guyana',
'BS': 'Bahamas',
'FJ': 'Fiji',
'SY': 'Syria',
'CV': 'Cabo Verde',
'AO': 'Angola',
'CG': 'Congo',
'ER': 'Eritrea',
'GN': 'Guinea',
'VA': 'Vatican City',
'ML': 'Mali',
'SZ': 'Eswatini',
'GM': 'Gambia',
'SD': 'Sudan',
'ZW': 'Zimbabwe',
'NP': 'Nepal',
'AG': 'Antigua and Barbuda',
'NI': 'Nicaragua',
'TD': 'Chad',
'LR': 'Liberia',
'MR': 'Mauritania',
'MM': 'Myanmar',
'BL': 'St. Barth',
'LC': 'Saint Lucia',
'BZ': 'Belize',
'BT': 'Bhutan',
'NG': 'Nigeria',
'VG': 'British Virgin Islands',
'GW': 'Guinea-Bissau',
'MS': 'Montserrat',
'KN': 'Saint Kitts and Nevis',
'SO': 'Somalia',
'TC': 'Turks and Caicos',
'GD': 'Grenada',
'LY': 'Libya',
'PG': 'Papua New Guinea',
'VC': 'St. Vincent Grenadines',
'TL': 'Timor-Leste'
}
wc_headers = ["country", "confirmed", "new_confirmed", "deaths", "new_deaths", "recovered", "active", "critical", "confirmed_per_1M", "deaths_per_1M", "tested"]
| hmiguel/covid | data.py | data.py | py | 9,976 | python | es | code | 0 | github-code | 13 |
73617093777 | #!/usr/bin/env python3
from flask import Flask, render_template, send_from_directory, make_response, request
from flask_mail import Mail, Message
import json
import os
from flask_sslify import SSLify
# Change this to False to switch to production mode and force https
ALLOW_HTTP = True
app = Flask(__name__, static_url_path='/assets', static_folder='assets')
app.config.update(
TESTING=False,
SECRET_KEY="SomeRandomKey123",
MAIL_SERVER="Your.SMTP.server.here",
MAIL_PORT=26,
MAIL_USE_TLS=False,
MAIL_USE_SSL=False,
MAIL_USERNAME="Your@Email.here",
MAIL_PASSWORD="YourPasswordHere",
MAIL_DEFAULT_SENDER="Your@Email.here"
)
ADMIN_EMAIL = "ujagaga@gmail.com"
ADMIN_NAME = "Rada Berar"
mail = Mail(app)
LOCALE = 'english'
LANGUAGE = {}
# When testing locally comment this out as it will force https which will fail on localhost
sslify = SSLify(app)
def load_language():
global LANGUAGE
lang_dir = 'language'
lang_file = os.path.join(lang_dir, LOCALE + ".json")
if not os.path.isdir(lang_dir):
os.mkdir(lang_dir)
if not os.path.isfile(lang_file):
with open(lang_file, 'w') as f:
data = json.dumps(LANGUAGE)
f.write(data.replace('{', '{\n').replace('}', '\n}').replace(',', ',\n'))
try:
f = open(lang_file, 'r')
raw_data = f.read()
f.close()
LANGUAGE = json.loads(raw_data)
except Exception as e:
print("ERROR parsing language", e)
@app.route('/', methods=['GET'])
def home():
load_language()
return render_template('index.html', lang=LANGUAGE, admin_email=ADMIN_EMAIL, admin_name=ADMIN_NAME)
@app.route('/collect', methods=['POST'])
def send_sample():
load_language()
file = request.files['file']
error = None
if file:
name = file.filename
file_name = "{}.ogg".format(name.strip().replace(' ', '_'))
try:
msg = Message("{}: {}".format(LANGUAGE["audio_received"], name), recipients=[ADMIN_EMAIL])
msg.attach(filename=file_name, content_type="video/mpeg", data=file.read())
mail.send(msg)
return make_response(LANGUAGE["thank_you_msg"], 200)
except Exception as e:
print("ERROR sending email:", e)
error = e
else:
error = "No file received"
if error is not None:
# Send error notification email to admin
try:
msg = Message("ERROR in sending sample: {}".format(e), recipients=[ADMIN_EMAIL])
mail.send(msg)
return make_response(LANGUAGE["error_1"], 200)
except Exception as e2:
print("ERROR sending email bug report:", e2)
return make_response(LANGUAGE["error_2"], 200)
else:
return make_response(LANGUAGE["error_2"], 200)
@app.route('/favicon.ico')
def favicon():
script_path = os.path.dirname(os.path.realpath(__file__))
return send_from_directory(script_path, 'assets/favicon.ico', mimetype='image/vnd.microsoft.icon')
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=ALLOW_HTTP)
| ujagaga/audioSampler | sampler.py | sampler.py | py | 3,160 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.