text stringlengths 38 1.54M |
|---|
"""
3 # @File : scenario_reduction.py
4 # @Author: Chen Zhen
5 # @Date : 2019/9/17
6 # @Desc : reduce the number of scenarios based on the paper: A two stage stochastic programming model for
lot-sizing and scheduling under uncertainty (2016) in CIE.
"""
import numpy as np
import math
# Python function to f.write permutations of a given list
def product(args, repeat):
pools = [tuple(args)] * repeat
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
return result
#tree 1
demand_scenarios = [[[134, 17, 40], [246, 62, 57], [84, 58, 28]], [[345, 269, 481], [341, 302, 611], [156, 123, 184]]]
demand_possibility = [[0.103, 0.383, 0.514], [0.185, 0.556, 0.259]]
scenario_num_need = 10 # scenario number after reducing
scenario_selected = [] # index of selected scenario
T = 6
booming_demand = [0, 0, 0, 0, 1, 1]
K = len(demand_possibility) # scenario number in a period
S = K ** T # total scenario number
# set values for scenario links: whether scenario i links with scenario j in period t
scenarioLink = [[[0 for s in range(S)] for s in range(S)] for t in range(T)]
for t in range(T):
slices = round(S * (1 / K)**(t+1)) # number of scenario in a slice
slice_num = round(K**(t+1)) # totoal number of slices
for i in range(slice_num):
for j in range(slices * i, slices * (i + 1)):
for k in range(slices * i, slices * (i + 1)):
scenarioLink[t][j][k] = 1
# set values for scenario probabilities
scenario_permulations = product(range(K), T)
scenario_probs = [0 for s in range(S)]
for s in range(S):
index = scenario_permulations[s][0]
scenario_probs[s] = demand_possibility[booming_demand[0]][index]
for i in range(1, len(scenario_permulations[s])):
index = scenario_permulations[s][i]
index2 = booming_demand[i]
scenario_probs[s] = scenario_probs[s] * demand_possibility[index2][index]
K = 1
J = np.arange(S) # index for unselected scenarios
d = [[0 for s in range(S)] for s in range(S)]
for i in range(S):
for j in range(i, S):
for k in range(len(scenario_permulations[0])):
d[i][j] += (scenario_permulations[i][k] - scenario_permulations[j][k])**2
d[i][j] = math.sqrt(d[i][j])
d[j][i] = d[i][j]
while K <= scenario_num_need:
if K == 1:
wd = [0 for s in range(S)]
for i in range(S):
for j in range(S):
wd[i] += scenario_probs[j] * d[j][i]
l = np.argmin(wd)
scenario_selected.append(l)
J = np.delete(J, l, axis = 0)
else:
m = len(J)
for i in J:
for j in J:
d[j][i] = min(d[j][i], d[j][l])
wd = [0 for i in range(m)]
index = 0
for i in J:
for j in J:
wd[index] += scenario_probs[j] * d[j][i]
index = index + 1
l = np.argmin(wd)
scenario_selected.append(J[l])
J = np.delete(J, l, axis = 0)
K = K + 1
print('scenario index:')
print(scenario_selected)
print('')
print('%d selected demand scenario index in each period:' % scenario_num_need)
for i in scenario_selected:
indexes = scenario_permulations[i]
scenario = []
for j in indexes:
scenario.append(demand_scenarios[booming_demand[j]][j][0])
print(scenario)
|
import time
# webdriver это и есть набор команд для управления браузером
from selenium import webdriver
from selenium.webdriver.support.ui import Select
# инициализируем драйвер браузера. После этой команды вы должны увидеть новое открытое окно браузера
driver = webdriver.Chrome("C:/chromedriver/chromedriver.exe")
# Метод get сообщает браузеру, что нужно открыть сайт по указанной ссылке
driver.get("http://suninjuly.github.io/selects1.html")
time.sleep(1)
def sum(x1,x2):
return str(int(x1)+int(x2))
# Считываем значение num1 и num2 и подсчитываем их сумму
num1 = driver.find_elements_by_id("num1")[0].text
num2 = driver.find_elements_by_id("num2")[0].text
y=sum(num1, num2)
# Выбираем из выпадающего списка результат суммы
select = Select(driver.find_element_by_tag_name("select"))
select.select_by_value(y)
time.sleep(1)
# Найдем кнопку submit
btnSubmit = driver.find_element_by_css_selector(".btn")
btnSubmit.click()
time.sleep(5)
# После выполнения всех действий мы не должны забыть закрыть окно браузера
driver.quit() |
import webbrowser
class Movie():
# Initialize the movie class with multiple fields
def __init__(self, title, year, description, rating, poster, trailer):
self.title = title
self.year = year
self.description = description
self.rating = rating
self.poster_image_url = poster
self.trailer_youtube_url = trailer
|
#http://coderbyte.com/CodingArea/GuestEditor.php?ct=Letter%20Capitalize&lan=Python
'''
Using the Python language, have the function LetterCapitalize(str) take the str parameter being passed and capitalize the first letter of each word. Words will be separated by only one space.
Input = "hello world"Output = "Hello World"
Input = "i ran there"Output = "I Ran There"
'''
import string; print raw_input('Write the mantra:').title() |
from django.contrib import admin
from .models import Choice, Question
from .models import Choice
admin.site.register(Question)
admin.site.register(Choice)
#class QuestionAdmin(admin.ModelAdmin):
# fieldsets = ['pub_date' ,'Question_text']
|
def leftrotation(the_array, times):
i=0
while i < times:
array_var = the_array[0]
the_array.append(array_var)
the_array.remove(array_var)
i += 1
print(the_array)
array_1 = [1,2,3,4,5]
leftrotation(array_1, 3) |
import torch
import torch.nn as nn
import numpy as np
from scipy import sparse
from scipy.sparse.linalg import svds
import matplotlib.pyplot as plt
from math import isnan
import csv
zeros = np.zeros
pinv = np.linalg.pinv
DATA_PATH = "./data/ml-100k/u.data"
def MoiveAvgRating(sparseDataMatrix:sparse.coo_matrix):
DenseMatrix = sparseDataMatrix.toarray().astype(np.float)
# number of rated movie
RatedMovies = np.sum(np.array(DenseMatrix != 0, dtype=np.float),
axis=1, keepdims=True)
TotalRatings = np.sum(DenseMatrix, axis=1, keepdims=True)
AverageRatings = TotalRatings /RatedMovies # average rating for each movie
GlobalAvg = np.sum(TotalRatings.reshape(-1))/np.sum(RatedMovies.reshape(-1))
# take care of movie nobody viewed.
return np.nan_to_num(AverageRatings, nan=GlobalAvg)
if "DATA" not in dir():
DATA = []
with open(DATA_PATH) as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t')
for row in spamreader:
DATA.append([int(row[0]) - 1, int(row[1]) - 1, int(row[2])])
DATA = np.array(DATA)
NUM_OBSERVATIONS = len(DATA) # num_observations = 100,000
NUM_USERS = max(DATA[:, 0]) + 1 # num_users = 943, indexed 0,...,942
NUM_ITEMS = max(DATA[:, 1]) + 1 # num_items = 1682 indexed 0,...,1681
np.random.seed(1)
NUM_TRAIN = int(0.8*NUM_OBSERVATIONS)
perm = np.random.permutation(DATA.shape[0])
TRAIN = DATA[perm[0:NUM_TRAIN], :]
TEST = DATA[perm[NUM_TRAIN::], :]
del perm
TRAIN_SPR = sparse.coo_matrix(
(TRAIN[:, 2], (TRAIN[:,1], TRAIN[:, 0])), (NUM_ITEMS, NUM_USERS)
)
TEST_SPR = sparse.coo_matrix(
(TEST[:, 2], (TEST[:,1], TEST[:, 0])), (NUM_ITEMS, NUM_USERS)
)
TRAIN_AVG = MoiveAvgRating(TRAIN_SPR)
print("DATA HAS BEEN LOADED. ")
# ========================== List of Helper Functions ==========================
def Ts():
from datetime import datetime
SysTime = datetime.now()
TimeStamp = SysTime.strftime("%H-%M-%S")
return TimeStamp
def mkdir(dir):
from pathlib import Path
Path(dir).mkdir(parents=True, exist_ok=True)
def log(fname:str, content:str, dir):
mkdir(dir)
TimeStamp = Ts()
with open(f"{dir}{TimeStamp}-{fname}.txt","w+") as f:
f.write(content)
# ==============================================================================
def Epsilon(approx, train=True):
Sparse = TRAIN_SPR if train else TEST_SPR
DiffSum = 0
for Idx, (II, JJ, Rating) in enumerate(
zip(Sparse.row, Sparse.col, Sparse.data)
):
DiffSum += (Rating - approx[II, JJ]) ** 2
return DiffSum / Idx
class AlternatingMinimization:
def __init__(this,
dataMatrix:np.ndarray,
d:int,
sigma,
regularizer,
tol=1e-2):
assert dataMatrix.ndim == 2
m, n = dataMatrix.shape
this.m, this.n = m, n
this.R = dataMatrix
this.Sigma = sigma
this.Lambda = regularizer
this.Tol = tol
this.Rank = d
this.V = sigma*np.random.randn(d, n)
this.U = sigma*np.random.randn(d, m)
this.I = np.eye(d)
this.M = np.array(dataMatrix != 0, dtype=np.float)
def UOpt(this):
L = this.Lambda
I = this.I
R = this.R
V = this.V
U = this.U
M = this.M
for K in range(this.m):
U[:, K:K+1] = pinv(V@(M[K:K + 1,:].T*V.T) + L*I)@(V@R[K:K+1, :].T)
def VOpt(this):
L = this.Lambda
I = this.I
R = this.R
V = this.V
U = this.U
M = this.M
for K in range(this.n):
V[:, K:K+1] = pinv(U@(M[:, K:K+1]*U.T) + L*I)@(U@R[:, K:K+1])
def TrainLoss(this):
return this.Loss()
def TestLoss(this):
return this.Loss(False)
def Loss(this, train=True):
Approx = this.U.T@this.V
return Epsilon(Approx, train=train)
def PartA():
DiffSum = 0
for Idx, (II, _, Rating) in enumerate(
zip(TEST_SPR.row, TEST_SPR.col, TEST_SPR.data)
):
DiffSum += (Rating - TRAIN_AVG[II, 0])**2
return DiffSum/Idx
def PartB(ranks=[1, 2, 5, 10, 20, 50]):
Ranks = sorted(ranks + [0])
RTilde = TRAIN_SPR.asfptype() # Filled with zeros.
U, Sigma, VTransposed = svds(RTilde, k=942)
U, Sigma, VTransposed = U[:, ::-1], Sigma[::-1], VTransposed[::-1]
Approximation = np.zeros(RTilde.shape)
MSETrain, MSETest = [], []
for RankStart, RankEnd in zip(Ranks[: -1], Ranks[1:]):
Approximation += U[:, RankStart: RankEnd]\
@\
np.diag(Sigma[RankStart: RankEnd])\
@\
VTransposed[RankStart: RankEnd]
MSETrain.append(Epsilon(Approximation, True))
MSETest.append(Epsilon(Approximation, False))
return ranks, MSETrain, MSETest
def MatrixComplete(d, sigma, regularizer):
Instance = AlternatingMinimization(
TRAIN_SPR.asfptype().toarray(),
d=d,
sigma=sigma,
regularizer=regularizer
)
for II in range(100):
Loss = Instance.Loss()
Instance.UOpt()
Instance.VOpt()
print(Loss)
if Loss - Instance.Loss() < 1e-2:
TestLoss = Instance.TestLoss()
break
return Loss, TestLoss
def main():
FolderPath = "./B1"
mkdir(FolderPath)
def ParA():
PartAError = PartA()
with open(f"{FolderPath}/part-a.txt", "w+") as f:
f.write(f"For part (a), the error on test set is: {PartAError}")
print(f"ParA Done")
# ParA()
def ParB():
Ranks, TrainErr, TestErr = PartB()
print(f"Train Errors {TrainErr, TestErr}")
plt.plot(Ranks, TrainErr, "-o")
plt.plot(Ranks, TestErr, "-o")
plt.legend(["Train MSE", "Test MSE"])
plt.title("Ranks and Reconstruction (Filled with Zeroes)")
plt.savefig(f"{FolderPath}/{Ts()}-b1-b.png")
plt.show()
plt.clf()
# ParB()
def ParC():
Ranks = [1, 2, 5, 10, 20, 50]
TrainLosses, TestLosses = [], []
for Rank in Ranks:
TrainLoss, TestLoss = MatrixComplete(Rank, 1, Rank/10)
TrainLosses.append(TrainLoss)
TestLosses.append(TestLoss)
plt.plot(TrainLosses, "-o")
plt.plot(TestLosses, "-o")
plt.legend(["Train MSE", "Test MSE"])
plt.title("Matrix Complete, Ignore zeros, Alternating Opt")
plt.savefig(f"{FolderPath}/{Ts()}-b1-c.png")
plt.show()
plt.clf()
ParC()
if __name__ == "__main__":
import os
print(os.getcwd())
print(os.curdir)
main()
|
import lang
import flect
class ComponentDefinition(object):
__slots__ = [
"name",
"language",
"interpreter"
]
def __init__(self, name, language):
self.name = name
self.language = language
def create_component(self):
return None
def initilize_interpreter(self, interpreter):
if interpreter:
self.interpreter = interpreter
else:
self.interpreter = lang.Interpreter_Propagate()
def interpret(self, environment, component):
return self.interpreter.interpret(environment, component)
def parse_tokens(self, tokens):
component = self.create_component()
consumed = component.initilize(tokens)
if consumed == len(tokens):
return component.interpret(None)
def parse_text(self, text):
return self.parse_tokens(self.language.tokenize(text))
def __str__(self, *args, **kwargs):
return self.name
def __repr__(self, *args, **kwargs):
return flect.repr_self(self) |
# our prime list is empty here
primes = []
def is_prime(x):
a = True
for i in primes:
if x % i == 0:
a = False
break
if i > int(x ** 0.5):
break
if a:
primes.append(x)
return a
# this loop simply runs the fxn to add newer primes
for i in range(2, 9999):
is_prime(i)
primes2 = []
for prime in primes:
if prime > 1000:
primes2.append(prime)
def getDigits(x):
digits = []
while x > 0:
digits.append(x % 10)
x = x // 10
return digits
def permutate(x):
digits = getDigits(x)
perms = []
for d1 in digits:
for d2 in digits:
if d2 != d1:
for d3 in digits:
if d3 not in [d1, d2]:
for d4 in digits:
if d4 not in [d1, d2, d3]:
n = d1*1000 + d2*100 + d3*10 + d4
perms.append(n)
return perms
def areEqualspaced(n1, n2, n3):
if n1 - n2 == n2 - n3 or n2 - n3 == n3 - n1 or n3 - n1 == n1 - n2:
return True
else:
return False
for prime in primes2:
permPrimes = []
for perm in permutate(prime):
if perm in primes2:
permPrimes.append(perm)
if len(permPrimes) >= 3:
for perm in permPrimes:
for perm2 in permPrimes:
if perm2 != perm:
for perm3 in permPrimes:
if areEqualspaced(perm, perm2, perm3):
print(perm, perm2, perm3)
print("Digits of 1234 are : ", getDigits(1234))
print("Permutations of 1234 are : ", permutate(1234))
|
from datetime import datetime
from googleads import adwords, oauth2
import random, time, uuid, ast
import urllib.request as urllib2
MAX_POLL_ATTEMPS = 5
PENDING_STATUSES = ("ACTIVE", "AWAITING_FILE", "CANCELING")
API_VERSION = "v201702"
class BatchJob(object):
def __init__(self, client):
self.batchJobHelper = client.GetBatchJobHelper(version=API_VERSION)
self.service = client.GetService("BatchJobService", version=API_VERSION)
self.batchJob = self.addBatchJob()
self.uploadUrl = self.batchJob["uploadUrl"]["url"]
self.id = self.batchJob["id"]
self.downloadUrl = None
self.budgetOps = []
self.campaignOps = []
self.agOps = []
self.kwOps = []
self.negativeOps = []
self.adOps = []
# adds batch job on __init__
def addBatchJob(self):
""" Input: Adwords client object
Output: BatchJob object
"""
batch_job_operations = [{
"operand": {},
"operator": "ADD"
}]
return self.service.mutate(batch_job_operations)["value"][0]
# to be called by user, once all desired operations were added
def runBatchJob(self):
possible_arguments = [self.budgetOps, self.campaignOps, self.agOps, self.kwOps, self.negativeOps, self.adOps]
# only add operations to args list, if there's operations in there
arguments_list = [entity for entity in possible_arguments if entity]
arguments.insert(0, self.uploadUrl)
def wrapper(func, args):
func(args)
# call batchJobHelper.UploadOperations() with arguments list
wrapper(self.batchJobHelper.UploadOperations, arguments_list)
return self.getDownloadUrlWhenReady()
# checks if batch job is ready, and if it is, gets download url
def getDownloadUrlWhenReady(self):
""" Input: Adwords Client object, Int BatchJobId
Output: String, url of batch job result
"""
selector = {
"fields": ["Id", "Status", "DownloadUrl"],
"predicates": [{
"field": "Id",
"operator": "EQUALS",
"values": [self.id]
}]
}
batch_job = self.service.get(selector)["entries"][0]
poll_attempt = 0
while (poll_attempt in range(MAX_POLL_ATTEMPS) and batch_job["status"] in PENDING_STATUSES):
sleep_interval = (30 * (2 ** poll_attempt) + (random.randint(0, 10000) / 1000))
print("Batch Job not ready, sleeping for %s seconds" % sleep_interval)
time.sleep(sleep_interval)
batch_job = self.service.get(selector)["entries"][0]
poll_attempt += 1
if "downloadUrl" in batch_job:
url = batch_job["downloadUrl"]["url"]
print("Batch Job with Id '%s', Status '%s' and DownloadUrl '%s' is ready." % (batch_job["id"], batch_job["status"], url))
return url
raise Exception("Batch Job not finished downloading. Try checking later.")
def getHelperId(self):
return self.batchJobHelper.GetId()
""" ////////////////////////////////////////////////////////////////////////////////////////////////////////////
OPERATION BUILDERS TO PASS TO BATCH JOB
////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def addBudgetOps(self, str_budget_id, str_budget_name, str_budget_amount):
""" Input: BatchJobHelper Class, List of JSON objects
Output: List of JSON objects
Will build a list of JSON budget operations, to pass to the batch job
"""
budget_operation = {
"xsi_type": "BudgetOperation",
"operand": {
"name": str_budget_name,
"budgetId": str_budget_id,
"amount": int(str_budget_amount),
"deliveryMethod": "STANDARD"
},
"operator": "ADD"
}
if budget_operation not in self.budgetOps:
self.budgetOps.append(budget_operation)
def addCampaignOps(self, str_camp_id, str_camp_name, str_budget_id):
""" Input: BatchJobHelper Class, List of JSON objects
Output: List of JSON objects
"""
campaign_operation = {
"xsi_type": "CampaignOperation",
"operand": {
"name": str_camp_name,
"status": "PAUSED",
"id": str_camp_id,
"advertisingChannelType": "SEARCH",
"networkSetting": {
"targetGoogleSearch": "TRUE",
"targetSearchNetwork": "TRUE",
"targetContentNetwork": "FALSE"
},
"budget": {
"budgetId": str_budget_id
},
"biddingStrategyConfiguration": {
"biddingStrategyType": "MANUAL_CPC"
}
},
"operator": "ADD"
}
if campaign_operation not in self.campaignOps:
self.campaignOps.append(campaign_operation)
def addAdGroupOps(self, str_campaignId, str_agId, str_agName, str_agBid):
""" Input: BatchJobHelper Class, List of JSON objects
Output: List of JSON objects
"""
adgroup_operation = {
"xsi_type": "AdGroupOperation",
"operand": {
"campaignId": str_campaignId,
"id": str_agId,
"name": str_agName,
"status": "ENABLED",
"urlCustomParameters": {"parameters": [{"key": "group", "value": str_agName}]},
"biddingStrategyConfiguration": {
"bids": [
{
"xsi_type": "CpcBid",
"bid": {"microAmount": str_agBid}
}]
}
},
"operator": "ADD",
}
if adgroup_operation not in self.agOps:
self.agOps.append(adgroup_operation)
def addKeywordOps(self, str_agId, str_query, str_matchtype, str_finalUrl):
""" Input: BatchJobHelper Class, List of JSON objects
Output: List of JSON objects
"""
keyword_operation = {
"xsi_type": "AdGroupCriterionOperation",
"operand": {
"xsi_type": "BiddableAdGroupCriterion",
"adGroupId": str_agId,
"criterion": {
"xsi_type": "Keyword",
"text": str_query,
"matchType": str_matchtype
},
"finalUrls": {"urls": [str_finalUrl]}
},
"operator": "ADD"
}
if keyword_operation not in self.kwOps:
self.kwOps.append(keyword_operation)
def addNegativeOps(self, str_agId, str_query, str_matchtype):
""" Input: BatchJobHelper Class, List of JSON objects
Output: List of JSON objects
"""
# don't exclude on exact match keywords
if str_matchtype != "EXACT":
#get rid of plusses in case of BMM query/keyword
query_string = str_query.replace("+", "")
keyword_operation = {
"xsi_type": "AdGroupCriterionOperation",
"operand": {
"xsi_type": "NegativeAdGroupCriterion",
"adGroupId": str_agId,
"criterion": {
"xsi_type": "Keyword",
"text": query_string,
"matchType": "EXACT"
},
"criterionUse": "NEGATIVE"
},
"operator": "ADD"
}
if keyword_operation not in self.negativeOps:
self.negativeOps.append(keyword_operation)
def addAdOps(self, str_agId, list_ads):
""" Input: BatchJobHelper Class, List of JSON objects
Output: List of JSON objects
"""
for ad in list_ads:
ad_operation = {
"xsi_type": "AdGroupAdOperation",
"operand": {
"adGroupId": str_agId,
"ad": {
"xsi_type": "ExpandedTextAd",
"headlinePart1": ad["HeadlinePart1"],
"headlinePart2": ad["HeadlinePart2"],
"description": ad["Description"],
# "path1": ad["ad"]["path1"],
# "path2": ad["ad"]["path2"],
"finalUrls": [ad["CreativeFinalUrls"][4:][:-4]]
},
"status": ad["Status"]
},
"operator": "ADD"
}
if ad_operation not in self.adOps:
self.adOps.append(ad_operation) |
from tornado.web import Application
from handlers.employees import EmployeesHandler, EmployeeHandler
def make_app():
return Application([(r'/api/employees', EmployeesHandler),
(r'/api/employees/([^/]+)', EmployeeHandler)])
|
''' copy the whole directory to dpm
'''
# Standard imports
import os
# default locations
from StopsDilepton.samples.default_locations import default_locations
# Arguments
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser for cmgPostProcessing")
#argParser.add_argument('--logLevel', action='store', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], default='INFO', help="Log level for logging" )
argParser.add_argument('--target_path', action='store', nargs='?', type=str, default = 'Stops2l-postprocessed', help="Name of the directory the post-processed files will be saved" )
argParser.add_argument('--year', action='store', type=int, default = 2016, choices = [2016, 2017, 2018], help="Which year?" )
args = argParser.parse_args()
if args.year == 2016:
mc_source_path = os.path.join( default_locations.mc_2016_data_directory, default_locations.mc_2016_postProcessing_directory)
data_source_path = os.path.join( default_locations.data_2016_data_directory, default_locations.data_2016_postProcessing_directory)
mc_postProcessing_directory = default_locations.mc_2016_postProcessing_directory
data_postProcessing_directory = default_locations.data_2016_postProcessing_directory
signal_data_directory = "/afs/hephy.at/data/cms04/nanoTuples/"
signal_postProcessing_directory = "stops_2016_nano_v0p23/dilep/"
elif args.year == 2017:
mc_source_path = os.path.join( default_locations.mc_2017_data_directory, default_locations.mc_2017_postProcessing_directory)
data_source_path = os.path.join( default_locations.data_2017_data_directory, default_locations.data_2017_postProcessing_directory)
mc_postProcessing_directory = default_locations.mc_2017_postProcessing_directory
data_postProcessing_directory = default_locations.data_2017_postProcessing_directory
signal_data_directory = "/afs/hephy.at/data/cms04/nanoTuples/"
signal_postProcessing_directory = "stops_2017_nano_v0p23/dilep/"
elif args.year == 2018:
mc_source_path = os.path.join( default_locations.mc_2018_data_directory, default_locations.mc_2018_postProcessing_directory)
data_source_path = os.path.join( default_locations.data_2018_data_directory, default_locations.data_2018_postProcessing_directory)
mc_postProcessing_directory = default_locations.mc_2018_postProcessing_directory
data_postProcessing_directory = default_locations.data_2018_postProcessing_directory
signal_data_directory = "/afs/hephy.at/data/cms04/nanoTuples/"
signal_postProcessing_directory = "stops_2018_nano_v0p23/dilep/"
data_target_path = '/dpm/oeaw.ac.at/home/cms/store/user/%s/%s/%s'%( os.environ['USER'], args.target_path, data_postProcessing_directory)
mc_target_path = '/dpm/oeaw.ac.at/home/cms/store/user/%s/%s/%s'%( os.environ['USER'], args.target_path, mc_postProcessing_directory)
signal_source_path = os.path.join( signal_data_directory, signal_postProcessing_directory)
signal_target_path = '/dpm/oeaw.ac.at/home/cms/store/user/%s/%s/%s'%( os.environ['USER'], args.target_path, signal_postProcessing_directory)
jobs=[]
#for source, target in [( mc_source_path, mc_target_path)]: #(signal_source_path, signal_target_path), ( data_source_path, data_target_path)]:
#for source, target in [( data_source_path, data_target_path)]:
for source, target in [ (signal_source_path, signal_target_path)]:
for obj in os.listdir(source):
jobs.append( ( os.path.join( source, obj ), os.path.join(target)) )
import subprocess
def wrapper( job ):
cmd = ["dpmTools.py", "--fromLocal", "--cp", job[0], job[1]]
#cmd = ["xrdcp", job[0], job[1]]
print " ".join(cmd)
subprocess.call(cmd)
from multiprocessing import Pool
pool = Pool(processes=2)
results = pool.map(wrapper, jobs)
pool.close()
pool.join()
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 03 15:56:31 2018
@author: MATTEK6 grp 2
"""
from path_import import _gaincalculation
from wmn_main import _interferencelimit
from main import main
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def transmissions_success(
x_cord, y_cord, distance, P_main, P_side, P_prob,
density, width, p_trans, systemvariables):
trans_x, trans_y = 1, 0
receiv_x, receiv_y = 0, 0
interfer, propinterfer = main(x_cord, y_cord, trans_x, trans_y, receiv_x,
receiv_y, P_main, P_side, P_prob, density,
width, p_trans)
limit = _interferencelimit(systemvariables['R'],
systemvariables['power']*P_main, distance)
if interfer < limit:
return 1
else:
return 0
def ppp(density, size):
number = np.random.poisson(density*size**2)
x = np.random.uniform(-size/2., size/2., number)
y = np.random.uniform(-size/2., size/2., number)
return x, y
if __name__ == '__main__':
# initializing varibles for summation of results to determine averge
abs0 = 0
pro0 = 0
l_abs0 = 0
l_pro0 = 0
throughput_sum0 = 0
latency_sum0 = 0
ET0_sum = 0
EL0_sum = 0
# set random seed (3 is used for documented results)
# np.random.seed(3)
# intital system settings
size = 20
intensity = 5
distance = 1
width = np.radians(30)
efficiency = 0.8
P_main, P_side = _gaincalculation(width, efficiency)
systemvariables = {'width': width, 'efficiency': efficiency,
'R': 2, 'power': 1, 'noise': 1}
Transmissionvariables = {'R': systemvariables['R'], 'packagesize': 1,
'Processinglag': 1, 'intensity': intensity}
p_trans = 0.5
maxiter = 50000
HastagPackage = 100
linLength = 200
# initializing graph class
intensity = np.linspace(0, 100, linLength)
throughput = np.zeros(linLength)
latency = np.zeros(linLength)
# start simulations
for d in range(linLength):
nsim = 400
throughput_sum1 = 0
latency_sum1 = 0
print(d)
for i in range(nsim):
# x, y cord for all devices
x, y = ppp(intensity[d], size)
# probability of a device interfering
P_ = ((systemvariables['width']/(2*np.pi)) * p_trans
* ((systemvariables['width']/(2*np.pi)) * P_main
+ (1-(systemvariables['width']/(2*np.pi))) * P_side))
p_received = 0
for j in range(maxiter):
p_received += transmissions_success(
x, y, distance, P_main, P_side, P_,
intensity[d], width, p_trans, systemvariables)
if p_received == HastagPackage:
break
# Determine resulting Throughput and latency
# the denominator transform into [packet/s] as the model from
# [packet/timeslot]
spath0ptr = (p_received/(j+1))/(1/systemvariables['R']+1)
# Same transformation
latency0 = ((j+1)/p_received)*(1/systemvariables['R']+1)
# summation for determination of average
throughput_sum0 += spath0ptr
latency_sum0 += latency0
throughput_sum1 += spath0ptr
latency_sum1 += latency0
# Determine corresponding expected results from model for
# comparison
# probability of transmission for each hop
# Pt_0 = float(sc.special.erfc(
# np.sqrt(
# (P_*(np.pi**3)*intensity[d]**2/2)
# / (2*((P_main*(distance)**(-4))
# / (2**(systemvariables['R'])-1))-1))
# ))
# var0 = 1/Pt_0 # sum of invers probabilities
# Expected throughput with spetial reuse
# ET0 = (1/((1/systemvariables['R'] + 1) * var0))
# # Expected Latency
# EL0 = ((1/systemvariables['R']+1)*var0)
# comparison with simulation, summation is done to determine
# the average
# path0
# abselute difference Throughput
# abs0 += np.abs(spath0ptr-ET0)
# # kvocentage deviation Throughput
# pro0 += (ET0/spath0ptr)
#
# # abselute difference Latency
# l_abs0 += np.abs(latency0-EL0)
# # kvocantage deviation Latency
# l_pro0 += EL0/latency0
#
# # summation for determine average
# ET0_sum += ET0
# EL0_sum += EL0
throughput[d] = throughput_sum1/nsim
latency[d] = latency_sum1/nsim
# array with average results
save = np.matrix([[abs0/nsim,
pro0/nsim,
throughput_sum0/nsim,
ET0_sum/nsim,
l_abs0/nsim,
l_pro0/nsim,
latency_sum0/nsim,
EL0_sum/nsim]])
# save results in txt file, remember to change name!!!!
# np.savetxt('data/1hop_R_I_x.out', save, delimiter=',')
# plt.figure(2)
# plt.plot(intensity, latency, label='1 hop connection')
# plt.text(0, 1.648, r'$Rate=%s, \ \theta = %s$' % (2, 30))
# plt.legend()
# plt.xlabel(r'Intensity $\lambda$')
# plt.ylabel('Latency [s]')
# plt.title('Calculated Throughput')
# plt.show()
#
# plt.figure(1)
# plt.plot(intensity, throughput, label='1 hop connection')
# plt.text(75, 0.663, r'$Rate=%s, \ \theta = %s$' % (2, 30))
# plt.legend()
# plt.xlabel(r'Intensity $\lambda$')
# plt.ylabel('Throughput [bits/s]')
# plt.title('Calculated Latency')
# plt.show()
|
number = int(raw_input("give me a number: "))
if (number%3 == 0) and (number%5 == 0):
print("Fizz Buzz")
elif (number%3) == 0:
print("Fizz")
elif (number%5) == 0:
print("Buzz")
else:
print("Please enter another number.")
|
#!/usr/bin/python
"""
==============================================================================
Author: Tao Li (taoli@ucsd.edu)
Date: Jul 10, 2015
Question: 150-Evaluate-Reverse-Polish-Notation
Link: https://leetcode.com/problems/evaluate-reverse-polish-notation/
==============================================================================
Evaluate the value of an arithmetic expression in Reverse Polish Notation.
Valid operators are +, -, *, /. Each operand may be an integer or another expression.
Some examples:
["2", "1", "+", "3", "*"] -> ((2 + 1) * 3) -> 9
["4", "13", "5", "/", "+"] -> (4 + (13 / 5)) -> 6
==============================================================================
Method: use a stack
Time Complexity: O(n)
Space Complexity: O(log n)
Note:
1. "6 / -132 -> -1" in python, while the answer is 0 in C++
2. use "int(float(x)/y)" to avoid this trick
==============================================================================
"""
class Solution:
# @param {string[]} tokens
# @return {integer}
def evalRPN(self, tokens):
stack = []
operators = {"+", "-", "*", "/"}
for i in tokens:
if i in operators:
y = stack.pop()
x = stack.pop()
if i == "+": x += y
elif i == "-": x -= y
elif i == "*": x *= y
elif i == "/": x = int(float(x)/y)
stack.append(x)
else:
stack.append(int(i))
return stack[-1]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@Author : xueqiang.liu
@contact : xqliu@dragondropinn.com
@Date : 2019/6/24
@Description :fastq文件质控
'''
import os
import time
import sys
import functools
from profile import Profile
var_path = Profile()
def timefly(func):
@functools.wraps(func)
def wrapper(*args,**kw):
s=time.time()
res = func(*args,**kw)
e=time.time()
print('{} runtime: {}'.format(func.__name__,TransTime(e-s)))
return res
return wrapper
def TransTime(seconds):
h = seconds//3600
m = seconds%3600//60
s = seconds%60
return '{}h {}min {:.0f}s'.format(h,m,s)
@timefly
def QC(prefix, threads):
print('{0} 开始对样本{1}进行变异分析......'.format(time.ctime(), prefix))
path = 'DataQC/InterFiles'
os.makedirs(path,exist_ok=True)
fastq = [ i for i in os.listdir() if prefix in i]
fastq1 = ''.join([i for i in fastq if '1.f' in i])
fastq2 = ''.join([i for i in fastq if '2.f' in i])
os.system("{fastp} -w {th} --in1 {f1} --out1 {p}/{T}_R1.clean.fastq.gz "
"--in2 {f2} --out2 {p}/{T}_R2.clean.fastq.gz "
"--low_complexity_filter --correction --length_required=70 "
"--html DataQC/{T}.QCReport.html --json {p}/{T}.json --report_title {p}/{T}.QCReport "
" >{p}/{T}.fastp.log 2>&1".format(T=prefix, p=path,f1=fastq1, f2=fastq2, th=threads,**var_path))
os.system("python {summary4fastp} {p}/{T}.json > DataQC/{T}.QCsummary.xls ".format(T=prefix,p=path,**var_path))
if not os.path.exists('Result'):
os.mkdir('Result')
if __name__ == '__main__':
if len(sys.argv) < 2:
print('\nusage: python {} [prefix] [threads]\n'.format(sys.argv[0]))
sys.exit(1)
prefix = sys.argv[1]
threads = sys.argv[2]
QC(prefix, threads)
|
''' Tests for WMT extract file parser'''
import os
import pytest
from xlrd import XLRDError
import wmt_etl.extract_parser as parser
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_FILE_PATH = os.path.join(THIS_DIR, 'data/WMT_Extract_sample.xlsx')
INVALID_DATA_FILE_PATH = os.path.join(THIS_DIR, 'data/Invalid_WMT_Extract_sample.xlsx')
INVALID_FILE_TYPE_PATH = os.path.join(THIS_DIR, 'data/invalid.txt')
def test_column_unchanged():
'''Already clean column names should remain unchanged'''
name = 'staff_grade'
clean = parser.clean_name(name)
assert clean == name
def test_clean_names():
'''Column names should be cleaned of illegal characters, case and trailing whitespace'''
name = ' My-Staff_Grade '
clean = parser.clean_name(name)
assert clean == 'mystaff_grade'
def test_transform_cols():
'''Set of column headers should be mapped to staging compatible format'''
cols = ['OM_Key', 'Team_Code', 'LicIn1st16Weeks', 'V-CRN_Count']
clean_cols = parser.transform_names(cols)
assert clean_cols[0] == 'om_key'
assert clean_cols[1] == 'team_code'
assert clean_cols[2] == 'licin1st16weeks'
assert clean_cols[3] == 'vcrn_count'
def test_validate_sheet_names():
'''Test validation of extract workbook format'''
sheet_names = [
'Wmt_Extract',
'Wmt_Extract_Filtered',
'Court_Reports',
'Inst_Reports',
'Flag_Warr_4_n',
'Flag_Upw',
'Flag_O_Due',
'Flag_Priority',
'CMS',
'GS',
'ARMS',
'T2A',
'WMT_Extract_SA',
'Suspended_Lifers',
'T2a_Detail',
'Omic_Teams']
assert parser.validate_workbook_format(sheet_names)
def test_invalid_sheet_names():
'''Test validation fails if extract format not as expected'''
sheet_names = ['wmt_extract', 'court_reports', 'dummy_value']
assert not parser.validate_workbook_format(sheet_names)
def test_load_workbook():
'''Test that a workbook in XLSX format can be successfully loaded'''
workbook = parser.load_workbook(TEST_DATA_FILE_PATH)
assert len(workbook.sheet_names) == 16
assert workbook.sheet_names[0] == 'WMT_Extract'
assert workbook.sheet_names[1] == 'WMT_Extract_Filtered'
assert workbook.sheet_names[2] == 'Court_Reports'
assert workbook.sheet_names[13] == 'Suspended_Lifers'
assert workbook.sheet_names[14] == 'T2A_Detail'
assert workbook.sheet_names[15] == 'OMIC_Teams'
def test_parse_workbook():
'''Test that a workbook can be parsed correctly'''
workbook = parser.load_workbook(TEST_DATA_FILE_PATH)
dataframes = parser.parse_workbook(workbook)
print len(dataframes)
assert len(dataframes) == 16
assert not dataframes['wmt_extract'].empty
assert len(dataframes['wmt_extract'].columns) == 41
assert len(dataframes['wmt_extract'].index) == 2
assert len(dataframes['court_reports'].columns) == 17
assert len(dataframes['court_reports'].index) == 2
assert dataframes['wmt_extract'].columns[3] == 'ldu_desc'
assert dataframes['court_reports'].columns[7] == 'om_surname'
def test_load_workbook_missing_file():
'''Loading a missing workbook file should raise an error'''
with pytest.raises(IOError) as error:
parser.load_workbook('./data/missing.xlsx')
assert 'No such file or directory' in str(error.value)
def test_load_workbook_invalid():
'''Loading a workbook with invalid format should raise an error'''
with pytest.raises(ValueError) as error:
parser.load_workbook(INVALID_DATA_FILE_PATH)
assert 'Workbook does not contain the expected worksheets' in str(error.value)
def test_invalid_file_type():
'''Loading any file other than a valid workbook will raise an error'''
with pytest.raises(XLRDError) as error:
parser.load_workbook(INVALID_FILE_TYPE_PATH)
assert 'Unsupported format, or corrupt file' in str(error.value)
|
# TODO: add name recognition, check for word combinations in handler2, thread handler2 with a separate dict
from bs4 import BeautifulSoup
from PIL import Image
import urllib
import unidecode
import pyscreenshot
import sys, os
import socket as cv2 #FIX NO NEED SOCKET
#import pytesseract
import argparse
import webbrowser
from google import google
import json
import threading
print_lock = threading.Lock()
negative_words = []
remove_words = []
def screen_grab(to_save):
im = pyscreenshot.grab(bbox=(40, 205, 485, 640))
im.save(to_save)
def read_screen():
screenshot_file = "Screens/to_ocr.png"
screen_grab(screenshot_file)
# load the image
image = cv2.imread(screenshot_file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# store grayscale image as a temp file to apply OCR
filename = "Screens/{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
text = pytesseract.image_to_string(Image.open(filename))
# os.remove(filename)
# os.remove(screenshot_file)
return text
def parse_question():
text = read_screen()
lines = text.splitlines()
question = ""
options = list()
flag = False
for line in lines:
if not flag:
question = question + " " + line
if '?' in line:
flag = True
continue
if flag:
if line != '':
options.append(line)
return question, options
def loadlists():
global negative_words, remove_words
remove_words = json.loads(open("settings.json").read())["remove_words"]
negative_words = json.loads(open("settings.json").read())["negative_words"]
def split_string(source):
splitlist = ",!-.;/?@ #"
output = []
atsplit = True
for char in source:
if char in splitlist:
atsplit = True
else:
if atsplit:
output.append(char)
atsplit = False
else:
output[-1] = output[-1] + char
return output
def smart_answer(content, qwords, inc):
zipped = zip(qwords, qwords[1:])
points = 0
for el in zipped:
if content.count(el[0] + " " + el[1]) != 0:
points += inc
return points
def handler(words, o, neg):
global points
content = ""
maxo = ""
maxp = - sys.maxsize
o = o.lower()
original = o
temp = 0
o += ' site:wikipedia.org'
search_wiki = google.search(o, 1)
link = search_wiki[0].link
with print_lock:
print (link)
content = urllib.request.urlopen(link).read()
soup = BeautifulSoup(content, "lxml")
page = soup.get_text().lower()
for word in words:
temp = temp + page.count(word)
for word in search_wiki[0].description:
if word in words:
temp += 1000
temp += smart_answer(page, words, 700)
link = search_wiki[1].link
content = urllib.request.urlopen(link).read()
soup = BeautifulSoup(content, "lxml")
page = soup.get_text().lower()
for word in words:
temp = temp + page.count(word.lower())
for word in search_wiki[1].description:
if word in words:
temp += 530
temp += smart_answer(page, words, 870)
if neg:
temp *= -1
points[original] = temp
points = {}
def cleanwords(string):
return [word for word in string if word.lower() not in remove_words]
def handler2(words, olist, neg):
global points2
o = ' '.join(words)
#print o
"""
try:
search_wiki = google.search(o, 1)
link = search_wiki[0].link
print link
os.system("open " + link)
# os.system("open " + str(link))
'''
try:
f = open("HQLINKDISPLAY.html", "w+")
f.write("<html>\n<title> HQ BOT URL DESCRIPTIONS asd</title>\n")
wtr = "<h1> Link " + str(1) + " : " + encode(search_wiki[1].name) + "</h1>\n<p> " + encode(search_wiki[1].description) + " </p>\n</br>"
print wtr
f.write(wt
r)
f.write("</html>")
os.system("open HQLINKDISPLAY.html")
except Exception, e:
print e
'''
try:
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
request = urllib2.Request(link, headers=user_agent)
content = urlopen(request).read()
soup = BeautifulSoup(content, "lxml")
page = soup.get_text().lower()
except:
content = urlopen(link).read()
soup = BeautifulSoup(content, "lxml")
page = soup.get_text().lower()
except Exception, e:
print ("OOPSEEEE WOOPSIEE WE MADE A FUCKY WUKCY " + str(e))
o += " site:wikipedia.org"
print o
search_wiki = google.search(o, 1)
link = search_wiki[0].link
os.system("open " + link)
content = urlopen(link).read()
soup = BeautifulSoup(content, "lxml")
page = soup.get_text().lower()
"""
o += " site:wikipedia.org"
print (o)
search_wiki = google.search(o, 1)
link = search_wiki[0].link
os.system("open " + link)
content = urllib.request.urlopen(link).read()
soup = BeautifulSoup(content, "lxml")
page = soup.get_text().lower()
for word in olist:
if neg:
points2[word.lower()] = 0 - ((page.count(word.lower()) * 2700))
# print (optionword + " : " + str((page.count(optionword.lower())*3500) + str((smart_answer(page, optionwords, 7000)))))
else:
points2[word.lower()] = 0 + ((page.count(word.lower()) * 2700))
# print (optionword + " : " + str((page.count(optionword.lower())*3500) + " | smart_answer: " + optionwords + str((smart_answer(page, optionwords, 7000)))))
points = {}
points2 = {}
def search(sim_ques, options, neg):
words = split_string(sim_ques)
threads = []
for i in range(len(options)):
t = threading.Thread(target=handler, args=[words, options[i], neg])
t.setDaemon(True)
threads.append(t)
t = threading.Thread(target=handler2, args=[words, options, neg])
t.setDaemon(True)
threads.append(t)
for i in threads:
i.start()
for i in threads:
i.join()
print (points)
print (points2)
for key in points:
points[key] += points2[key]
def bestchoice(d1):
v = list(d1.values())
k = list(d1.keys())
return k[v.index(max(v))]
def formquestion(question):
sentwords = question.lower().split()
clean = cleanwords(sentwords)
temp = ' '.join(clean)
clean_question = ""
for ch in temp:
if ch != "?" or ch != "\"" or ch != "\'":
clean_question += ch
return clean_question.lower(), [i for i in sentwords if i in negative_words]
def main():
loadlists()
question, options = parse_question()
try:
pass#os.system("open https://www.google.com/search?q=" + unidecode.unidecode('+'.join(question.split())))
except Exception:
pass
qsimp, neg = formquestion(question)
print("\n" + question + "\n")
search(question, options, neg)
answer = bestchoice(points)
print (answer)
os.system("say i think " + answer)
print (points)
#TODO: REPLACE ' and with %27
def othermain():
loadlists()
question = raw_input("Question: ")
temp = '+'.join(question.split())
try:
pass#os.system("open https://www.google.com/search?q=" + unidecode.unidecode(temp))
except Exception:
pass
print ("open https://www.google.com/search?q=" + unidecode.unidecode(temp))
options = []
for a in range(3):
options.append(raw_input("Option " + str(a + 1) + ": "))
qsimp, neg = formquestion(question)
#print("\n" + question + "\n")
search(question, options, neg)
print points
answer = bestchoice(points)
print (answer)
os.system("say i think \'" + answer + "\'")
if __name__ == '__main__':
try:
if sys.argv[1] == ("live"):
main()
else:
othermain()
except IndexError:
othermain()
|
"""
This file contains tools to facilitate the use of the system.
"""
def generate_connection_string(type_database: str = "", user: str = "", password: str = "", host: str = "localhost",
port: str = "", database: str = "") -> str:
"""
Returns the configuration string for SQLAlchmey to connect to the database.
:param type_database: Type database to connect. PS.: sqlite, postgre, mariadb, mysql, oracle...
:param user: User to connect into database.
:param password: Password to connect into database, but if not have user, this field will be ignored.
:param host: Host to connect database, by defaul is localhost, but if using sqlite, this field will be ignored.
:param port: Port to database's host, if not have host parameter, this field will be ignored.
:param database: Database's Name, if using SQLite, this need absolute path to SQLite file and filename.
:return: str: String with contains config to use in SQLAlchemy
"""
# Example of a full configuration string for SQLAlchemy.
# '<type_database>://<user>:<password>@<host>:<port>/<database>'
# security to avoid broken system
if isinstance(type_database, str) and isinstance(user, str) and isinstance(password, str) and \
isinstance(host, str) and isinstance(port, str) and isinstance(database, str):
string_connection = type_database + "://"
# if the user string is not empty, it will store the username in the string.
if not user == "":
string_connection = string_connection + user
# If the user has a password and user, it will store the password in the string.
if not password == "" and not user == "":
string_connection = string_connection + ":" + password
# If the connection has an address, it will store the address in the string.
if not host == "" and not type_database.lower() == "sqlite":
if not user == "":
string_connection = string_connection + "@" + host
else:
string_connection = string_connection + host
# If the connection requires a custom port and has a host
# insert the port into the string
if not port == "" and not host == "":
string_connection = string_connection + ":" + port
# Insert the server database into the string
string_connection = string_connection + "/" + database
return string_connection
return ""
|
from setuptools import Extension, find_packages, setup
from codecs import open
from os import path
from distutils.command.install import INSTALL_SCHEMES
import os
here = path.abspath(path.dirname(__file__))
def file_content(fpath):
with open(path.join(here, fpath), encoding='utf-8') as f:
return f.read()
s = setup(
name='lidar_gym',
version='0.0.1',
description='OpenAI gym training environment for agents controlling solid-state lidars',
long_description=file_content('README.md'),
url='https://gitlab.fel.cvut.cz/rozsyzde/lidar-gym',
author='Zdenek Rozsypalek, CVUT',
author_email='rozsyzde@fel.cvut.cz',
license='MIT',
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['gym', 'lidar', 'environment', 'openai'],
install_requires=['numpy', 'pykitti', 'voxel_map', 'gym'],
extras_require={'rendering': ['mayavi', 'vtk', 'qt'], 'agents': ['tensorforce']},
)
|
from abc import abstractmethod
import numpy as np
from macaw.models import LinearModel, LogisticModel, QuadraticModel
from .optimizers import GradientDescent, CoordinateDescent, MajorizationMinimization
__all__ = ['L1Norm', 'L2Norm', 'BernoulliLikelihood', 'Lasso',
'RidgeRegression', 'LogisticRegression', 'L1LogisticRegression']
class ObjectiveFunction(object):
"""An abstract class for a generic objective function."""
def __call__(self, theta):
"""Calls :func:`evaluate`"""
return self.evaluate(theta)
@abstractmethod
def evaluate(self, theta):
"""
Returns the objective function evaluated at theta
Parameters
----------
theta : ndarray
parameter vector of the model
Returns
-------
objective_fun : scalar
Returns the scalar value of the objective function evaluated at
**params**
"""
pass
def fit(self, x0, optimizer='gd', n=1000, xtol=1e-6, ftol=1e-9):
opts = {'gd': GradientDescent, 'cd': CoordinateDescent}
optimizer = opts[optimizer]
self.opt = optimizer(self.evaluate, self.gradient)
self.opt.compute(x0=x0, n=n, xtol=xtol, ftol=ftol)
return self.opt
class L1Norm(ObjectiveFunction):
r"""Defines the L1 Norm loss function. L1 norm is usually useful
to optimize the "median" model, i.e., it is more robust to
outliers than the quadratic loss function.
.. math::
\arg \min_{\theta \in \Theta} \sum_k |y_k - f(x_k, \theta)|
Attributes
----------
y : array-like
Observed data
model : callable
A functional form that defines the model
Examples
--------
>>> from macaw.objective_functions import L1Norm
>>> from macaw.optimizers import MajorizationMinimization
>>> from macaw.models import LinearModel
>>> import numpy as np
>>> # generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(0, 10, 200)
>>> fake_data = x * 3 + 10 + np.random.normal(scale=2, size=x.shape)
>>> # build the model
>>> my_line = LinearModel(x)
>>> # build the objective function
>>> l1norm = L1Norm(fake_data, my_line)
>>> # perform optimization
>>> mm = MajorizationMinimization(l1norm)
>>> mm.compute(x0=(1., 1.))
>>> # get best fit parameters
>>> print(mm.x)
[ 2.96016173 10.30580954]
"""
def __init__(self, y, model):
self.y = y
self.model = model
def __repr__(self):
return "<L1Norm(model={})>".format(self.model)
def evaluate(self, theta):
return np.nansum(np.absolute(self.y - self.model(*theta)))
def surrogate_fun(self, theta, theta_n):
"""Evaluates a surrogate function that majorizes the L1Norm."""
r = self.y - self.model(*theta)
abs_r = np.abs(self.y - self.model(*theta_n))
return .5 * np.nansum(r * r / (1e-6 + abs_r) + abs_r)
def gradient_surrogate(self, theta, theta_n):
"""Computes the gradient of the surrogate function."""
r = self.y - self.model(*theta)
abs_r = np.abs(self.y - self.model(*theta_n))
grad_model = self.model.gradient(*theta)
return - np.nansum(r * grad_model / abs_r, axis=-1)
def fit(self, x0, n=1000, xtol=1e-6, ftol=1e-9, **kwargs):
mm = MajorizationMinimization(self, **kwargs)
mm.compute(x0=x0, n=n, xtol=xtol, ftol=ftol)
return mm
class L2Norm(ObjectiveFunction):
r"""Defines the squared L2 norm loss function. L2 norm
tends to fit the model to the mean trend of the data.
.. math::
\arg \min_{w \in \mathcal{W}} \frac{1}{2}||y - f(X, \mathbf{w})||^{2}_{2}
Attributes
----------
y : array-like
Observed data
model : callable
A functional form that defines the model
yerr : scalar or array-like
Weights or uncertainties on each observed data point
Examples
--------
>>> import numpy as np
>>> from macaw.objective_functions import L2Norm
>>> from macaw.optimizers import GradientDescent
>>> from macaw.models import LinearModel
>>> # generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(0, 10, 200)
>>> fake_data = x * 3 + 10 + np.random.normal(scale=2, size=x.shape)
>>> # build the model
>>> my_line = LinearModel(x)
>>> # build the objective function
>>> l2norm = L2Norm(fake_data, my_line)
>>> # perform optimization
>>> gd = GradientDescent(l2norm.evaluate, l2norm.gradient)
>>> gd.compute(x0=(1., 1.))
>>> # get the best fit parameters
>>> print(gd.x)
[ 2.96263148 10.32861519]
"""
def __init__(self, y, model, yerr=1):
self.y = y
self.model = model
self.yerr = yerr
def __repr__(self):
return "<L2Norm(model={})>".format(self.model)
def evaluate(self, theta):
residual = self.y - self.model(*theta)
return .5 * np.nansum(residual * residual / (self.yerr * self.yerr))
def gradient(self, theta):
grad = self.model.gradient(*theta)
return - np.nansum((self.y - self.model(*theta)) * grad
/ (self.yerr * self.yerr), axis=-1)
class RidgeRegression(ObjectiveFunction):
r"""
Implements Ridge regression objective function.
Ridge regression is a specific case of regression in which the
model is linear, the objective function is the L2 norm,
and the regularization term is the L2 norm.
.. math::
\arg \min_{w \in \mathcal{W}} \frac{1}{2}||y - X\mathbf{w}||^{2}_{2} + \alpha||\mathbf{w}||^{2}_{2}
"""
def __init__(self, y, X, alpha=1):
self.y = y
self.model = LinearModel(X)
self.alpha = alpha
self._l2norm = L2Norm(y=self.y, model=self.model)
def evaluate(self, theta):
theta = np.asarray(theta)
return (2 * self._l2norm(theta) + self.alpha * np.nansum(theta * theta))
def gradient(self, theta):
return 2 * (self._l2norm.gradient(theta) + self.alpha * theta)
class Lasso(ObjectiveFunction):
r"""
Implements the Lasso objective function.
Lasso is usually used to estimate sparse coefficients.
.. math::
\arg \min_{w \in \mathcal{W}} \frac{1}{2\cdot n_{\text{samples}}}||y - X\mathbf{w}||^{2}_{2} + \alpha||\mathbf{w}||^{1}_{1}
"""
def __init__(self, y, X, alpha=1):
self.y = y
self.model = LinearModel(X)
self.alpha = alpha
self._l2norm = L2Norm(y=self.y, model=self.model)
def evaluate(self, theta):
return (self._l2norm(theta) / len(self.y)
+ self.alpha * np.nansum(np.abs(theta)))
def surrogate_fun(self, theta, theta_n):
theta = np.asarray(theta)
abs_n = np.abs(theta_n)
return (self._l2norm(theta) / len(self.y)
+ .5 * self.alpha * np.nansum(theta * theta / (1e-6 + abs_n) + abs_n))
def gradient_surrogate(self, theta, theta_n):
theta = np.asarray(theta)
return (self._l2norm.gradient(theta)
+ self.alpha * theta / np.abs(theta_n))
def fit(self, x0, n=1000, xtol=1e-6, ftol=1e-9, **kwargs):
mm = MajorizationMinimization(self, **kwargs)
mm.compute(x0=x0, n=n, xtol=xtol, ftol=ftol)
return mm
class BernoulliLikelihood(ObjectiveFunction):
r"""Implements the negative log likelihood function for independent
(possibly non-identical distributed) Bernoulli random variables.
This class also contains a method to compute maximum likelihood estimators
for the probability of a success.
More precisely, the MLE is computed as
.. math::
\arg \min_{\theta \in \Theta} - \sum_{i=1}^{n} y_i\log\pi_i(\mathbf{\theta}) + (1 - y_i)\log(1 - \pi_i(\mathbf{\theta}))
Attributes
----------
y : array-like
Observed data
model : callable
A functional form that defines the model for the probability of success
Examples
--------
>>> import numpy as np
>>> from macaw import BernoulliLikelihood
>>> from macaw.models import ConstantModel as constant
>>> # generate integer fake data in the set {0, 1}
>>> np.random.seed(0)
>>> y = np.random.choice([0, 1], size=100)
>>> # create a model
>>> p = constant()
>>> # perform optimization
>>> ber = BernoulliLikelihood(y=y, model=p)
>>> result = ber.fit(x0=[0.3])
>>> # get best fit parameters
>>> print(result.x)
[ 0.55999999]
>>> print(np.mean(y>0)) # theorectical MLE
0.56
>>> # get uncertainties on the best fit parameters
>>> print(ber.uncertainties(result.x))
[ 0.04963869]
>>> # theorectical uncertainty
>>> print(np.sqrt(.56 * .44 / 100))
0.049638694584
"""
def __init__(self, y, model):
self.y = np.asarray(y)
self.model = model
def evaluate(self, theta):
model_theta = self.model(*theta)
return - np.nansum(self.y * np.log(model_theta)
+ (1. - self.y) * np.log(1. - model_theta))
def gradient(self, theta):
model_theta = self.model(*theta)
grad = self.model.gradient(*theta)
return - np.nansum(self.y * grad / model_theta
- (1 - self.y) * grad / (1 - model_theta),
axis=-1)
def fisher_information_matrix(self, theta):
n_params = len(theta)
fisher = np.empty(shape=(n_params, n_params))
grad_model = self.model.gradient(*theta)
model = self.model(*theta)
for i in range(n_params):
for j in range(i, n_params):
fisher[i, j] = (grad_model[i] * grad_model[j] / model).sum()
fisher[j, i] = fisher[i, j]
return len(self.y) * fisher / (1 - self.model(*theta))
def uncertainties(self, theta):
inv_fisher = np.linalg.inv(self.fisher_information_matrix(theta))
return np.sqrt(np.diag(inv_fisher))
class LogisticRegression(BernoulliLikelihood):
r"""Implements a Logistic regression objective function for
Binary classification.
"""
def __init__(self, y, X):
self.X = X
super().__init__(y, LogisticModel(self.X))
self._linear_model = LinearModel(self.X)
def evaluate(self, theta):
l = self._linear_model(*theta)
return np.nansum((1 - self.y) * l + np.log1p(np.exp(-l)))
def gradient(self, theta):
l_grad = self._linear_model.gradient(*theta)
f, f_grad = self.model(*theta), self.model.gradient(*theta)
return np.nansum((1 - self.y) * l_grad - f_grad / f, axis=-1)
def surrogate_fun(self, theta, theta_n):
n = len(self.y)
f, fn = self.model(*theta), self.model(*theta_n)
l, ln = self._linear_model(*theta), self._linear_model(*theta_n)
return np.nansum((1 - self.y) * l + np.log1p(np.exp(-ln))) - n * np.log(n / np.nansum(fn / f))
def gradient_surrogate(self, theta, theta_n):
n = len(self.y)
l = self._linear_model(*theta)
f, fn = self.model(*theta), self.model(*theta_n)
l_grad = self._linear_model.gradient(*theta)
return np.nansum(((1 - self.y) - n * fn * np.exp(-l) / np.nansum(fn / f)) * l_grad,
axis=-1)
def predict(self, X):
model = LogisticModel(X)
return np.round(model(*self.opt.x))
class L1LogisticRegression(ObjectiveFunction):
r"""Implements a Logistic regression objective function with
L1-norm regularization for Binary classification.
"""
def __init__(self, y, X, alpha=.1):
self.y = y
self.X = X
self._logistic = LogisticRegression(y, X)
self.alpha = alpha
def evaluate(self, theta):
return self._logistic.evaluate(theta) + self.alpha * np.nansum(np.abs(theta))
def surrogate_fun(self, theta, theta_n):
theta = np.asarray(theta)
abs_n = np.abs(theta_n)
return (self._logistic.surrogate_fun(theta, theta_n)
+ .5 * self.alpha * np.nansum(theta * theta / abs_n + abs_n))
def gradient_surrogate(self, theta, theta_n):
theta = np.asarray(theta)
return (self._logistic.gradient_surrogate(theta, theta_n)
+ self.alpha * theta / np.abs(theta_n))
def fit(self, x0, n=10, xtol=1e-6, ftol=1e-9, **kwargs):
self.mm = MajorizationMinimization(self, **kwargs)
self.mm.compute(x0=x0, n=n, xtol=xtol, ftol=ftol)
return self.mm
def predict(self, X):
model = LogisticModel(X)
return np.round(model(*self.mm.x))
|
'''
Name: Sidharth Banerjee
ID : 1001622703
'''
import numpy as np
import matplotlib.pyplot as plt
import soundfile as sf
def Gamma(mu, theta):
num = 1 - (4/(1+mu))*np.tan(theta/2)
den = 1 + (4/(1+mu))*np.tan(theta/2)
return num/den
def u_n (x, a, g):
u = []
u.append(a*x[0])
for n in range (1, len(x), 1):
u.append(a*(x[n] + x[n-1]) + g*u[n-1])
return np.array(u)
def y_n (x, mu, u):
y = []
for n in range (0, len(x), 1):
y.append(x[n] + (mu-1)*u[n])
return np.array(y)
def plotData(y1, y2):
fft1 = abs(np.fft.fft(y1))
fft2 = abs(np.fft.fft(y2))
fft1 = fft1[:int(len(fft1)/4)]
fft2 = fft2[:int(len(fft2)/4)]
maxY = max(np.amax(fft1), np.amax(fft2))
plt.figure('Figure 1: Comparison of signal after applying shelving filter')
plt.subplot(1, 2, 1)
plt.ylim(0, maxY+100)
plt.plot(np.arange(0, len(fft1), 1), fft1)
plt.xlabel('Hz')
plt.title('Original Signal')
plt.tight_layout()
plt.subplot(1, 2, 2)
plt.ylim(0, maxY+100)
plt.plot(np.arange(0, len(fft2), 1), fft2)
plt.xlabel('Hz')
plt.title('Filtered Signal')
plt.tight_layout()
plt.show()
def applyShelvingFilter(inName, outName, g, fc) :
data, samplerate = sf.read(inName)
theta = (2*np.pi*fc)/samplerate
mu = 10**(g/20)
gamma = Gamma(mu, theta)
alpha = (1-gamma)/2
u = u_n(data, alpha, gamma)
y = y_n(data, mu, u)
plotData(data, y)
sf.write(outName, y, samplerate)
########################## main ##########################
if __name__ == "__main__" :
inName = "P_9_1.wav"
gain = -10 # can be positive or negative
# WARNING: small positive values can greatly amplify the sounds
cutoff = 300
outName = "shelvingOutput.wav"
applyShelvingFilter(inName, outName, gain, cutoff)
|
from aoc2019.day06.part1 import build_orbits
def test_checksum():
orbit_map = build_orbits(
['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L', 'K)YOU', 'I)SAN'])
orbit_map['YOU'].search(-1)
assert(orbit_map['SAN'].dist == 4)
|
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from club.models import Club
from mahjong_portal.models import BaseModel
from player.models import Player
from rating.calculation.hardcoded_coefficients import AGARI_TOURNAMENT_ID
from settings.models import City, Country
class PublicTournamentManager(models.Manager):
def get_queryset(self):
queryset = super(PublicTournamentManager, self).get_queryset()
return queryset.exclude(is_hidden=True)
class Tournament(BaseModel):
RIICHI = 0
MCR = 1
RR = "rr"
CRR = "crr"
EMA = "ema"
FOREIGN_EMA = "fema"
OTHER = "other"
ONLINE = "online"
CHAMPIONSHIP = "champ"
GAME_TYPES = [[RIICHI, "Riichi"], [MCR, "MCR"]]
TOURNAMENT_TYPES = [
[RR, "rr"],
[CRR, "crr"],
[EMA, "ema"],
[FOREIGN_EMA, "fema"],
[OTHER, "other"],
[ONLINE, "online"],
[CHAMPIONSHIP, "champ."],
]
objects = models.Manager()
public = PublicTournamentManager()
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, max_length=255)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(db_index=True)
number_of_sessions = models.PositiveSmallIntegerField(default=0, blank=True)
number_of_players = models.PositiveSmallIntegerField(default=0, blank=True)
registration_description = models.TextField(null=True, blank=True, default="")
registration_link = models.URLField(null=True, blank=True, default="")
results_description = models.TextField(null=True, blank=True, default="")
clubs = models.ManyToManyField(Club, blank=True)
country = models.ForeignKey(Country, on_delete=models.PROTECT)
city = models.ForeignKey(City, on_delete=models.PROTECT, null=True, blank=True)
tournament_type = models.CharField(max_length=10, choices=TOURNAMENT_TYPES, default=RR, db_index=True)
is_upcoming = models.BooleanField(default=False)
is_hidden = models.BooleanField(default=False)
is_event = models.BooleanField(default=False)
is_majsoul_tournament = models.BooleanField(default=False)
is_pantheon_registration = models.BooleanField(default=False)
russian_cup = models.BooleanField(default=False)
fill_city_in_registration = models.BooleanField(default=False)
opened_registration = models.BooleanField(default=False)
registrations_pre_moderation = models.BooleanField(default=False)
# Sometimes people need to leave notes in registration form
display_notes = models.BooleanField(default=False)
old_pantheon_id = models.CharField(max_length=20, null=True, blank=True)
new_pantheon_id = models.CharField(max_length=20, null=True, blank=True)
ema_id = models.CharField(max_length=20, null=True, blank=True)
def __unicode__(self):
return self.name
def get_url(self):
if self.is_upcoming:
return reverse("tournament_announcement", kwargs={"slug": self.slug})
else:
return reverse("tournament_details", kwargs={"slug": self.slug})
@property
def type_badge_class(self):
if self.is_ema():
return "success"
if self.is_rr():
return "primary"
if self.is_crr():
return "info"
if self.is_online():
return "warning"
if self.is_championship():
return "championship"
return "info"
@property
def text_badge_class(self):
if self.is_online():
return "text-dark"
return ""
@property
def type_help_text(self):
if self.is_ema():
return "EMA, RR, CRR"
if self.is_rr():
return "RR, CRR"
if self.is_crr():
return "CRR"
if self.is_online():
return "Online"
return ""
@property
def type_display(self):
if self.tournament_type == self.FOREIGN_EMA:
return "EMA"
else:
return self.get_tournament_type_display()
@property
def rating_link(self):
if self.is_other() or self.is_championship():
return ""
tournament_type = self.tournament_type
if tournament_type == self.FOREIGN_EMA:
tournament_type = self.EMA
return reverse("rating", kwargs={"slug": tournament_type})
def is_ema(self):
return self.tournament_type == self.EMA or self.tournament_type == self.FOREIGN_EMA
def is_rr(self):
return self.tournament_type == self.RR
def is_crr(self):
return self.tournament_type == self.CRR
def is_online(self):
return self.tournament_type == self.ONLINE
def is_other(self):
return self.tournament_type == self.OTHER
def is_championship(self):
return self.tournament_type == self.CHAMPIONSHIP
def is_stage_tournament(self):
return self.id == AGARI_TOURNAMENT_ID
def get_tournament_registrations(self):
if self.is_online():
return self.online_tournament_registrations.filter(is_approved=True)
else:
return self.tournament_registrations.filter(is_approved=True)
def championship_tournament_results(self):
results = TournamentResult.objects.filter(tournament=self).order_by("place")
if self.tournament_type == Tournament.CHAMPIONSHIP:
results = results.filter(player__country__code="RU")
else:
results = results[:8]
return results
class TournamentResult(BaseModel):
tournament = models.ForeignKey(Tournament, related_name="results", on_delete=models.PROTECT)
player = models.ForeignKey(
Player, on_delete=models.PROTECT, related_name="tournament_results", null=True, blank=True
)
player_string = models.CharField(max_length=512, null=True, blank=True)
place = models.PositiveSmallIntegerField()
scores = models.DecimalField(default=None, decimal_places=2, max_digits=10, null=True, blank=True)
exclude_from_rating = models.BooleanField(default=False)
games = models.PositiveSmallIntegerField(default=0)
# for players without profile
country = models.ForeignKey(Country, on_delete=models.CASCADE, null=True, blank=True)
def __unicode__(self):
return self.tournament.name
@property
def base_rank(self):
number_of_players = self.tournament.number_of_players
place = self.place
# first place
if place == 1:
return 1000
if place == number_of_players:
return 0
return round(((number_of_players - place) / (number_of_players - 1)) * 1000, 2)
class TournamentRegistration(BaseModel):
tournament = models.ForeignKey(Tournament, related_name="tournament_registrations", on_delete=models.PROTECT)
is_approved = models.BooleanField(default=True)
first_name = models.CharField(max_length=255, verbose_name=_("First name"))
last_name = models.CharField(max_length=255, verbose_name=_("Last name"))
city = models.CharField(max_length=255, verbose_name=_("City"))
phone = models.CharField(
max_length=255, verbose_name=_("Phone"), help_text=_("It will be visible only to the administrator")
)
additional_contact = models.CharField(
max_length=255,
verbose_name=_("Additional contact. Optional"),
help_text=_("It will be visible only to the administrator"),
default="",
null=True,
blank=True,
)
is_highlighted = models.BooleanField(default=False)
notes = models.TextField(null=True, blank=True, default="", verbose_name=_("Team name"))
player = models.ForeignKey(
Player, on_delete=models.CASCADE, null=True, blank=True, related_name="tournament_registrations"
)
city_object = models.ForeignKey(City, on_delete=models.CASCADE, null=True, blank=True)
allow_to_save_data = models.BooleanField(default=False, verbose_name=_("I allow to store my personal data"))
def __unicode__(self):
return self.full_name
@property
def full_name(self):
return "{} {}".format(self.last_name, self.first_name)
class OnlineTournamentRegistration(BaseModel):
tournament = models.ForeignKey(Tournament, related_name="online_tournament_registrations", on_delete=models.PROTECT)
is_approved = models.BooleanField(default=True)
first_name = models.CharField(max_length=255, verbose_name=_("First name"))
last_name = models.CharField(max_length=255, verbose_name=_("Last name"), null=True, blank=True)
city = models.CharField(max_length=255, verbose_name=_("City"))
tenhou_nickname = models.CharField(max_length=255, verbose_name=_("Tenhou.net nickname"), null=True, blank=True)
is_highlighted = models.BooleanField(default=False)
contact = models.CharField(
max_length=255,
verbose_name=_("Your contact (email, phone, etc.)"),
help_text=_("It will be visible only to the administrator"),
)
player = models.ForeignKey(
Player, on_delete=models.CASCADE, null=True, blank=True, related_name="online_tournament_registrations"
)
user = models.ForeignKey("account.User", on_delete=models.CASCADE, null=True, blank=True)
city_object = models.ForeignKey(City, on_delete=models.CASCADE, null=True, blank=True)
allow_to_save_data = models.BooleanField(default=False, verbose_name=_("I allow to store my personal data"))
notes = models.TextField(null=True, blank=True, default="", verbose_name=_("Additional info"))
class Meta:
unique_together = ["tenhou_nickname", "tournament"]
def __unicode__(self):
return self.full_name
@property
def full_name(self):
return "{} {}".format(self.last_name, self.first_name)
class TournamentApplication(BaseModel):
tournament_name = models.CharField(max_length=255, verbose_name=_("Tournament name"))
city = models.CharField(max_length=255, verbose_name=_("City"))
tournament_type = models.PositiveSmallIntegerField(
verbose_name=_("Tournament type"), choices=[[0, "CRR"], [1, "RR"], [2, "EMA"]], default=0
)
start_date = models.CharField(max_length=255, verbose_name=_("Start date"))
end_date = models.CharField(
max_length=255,
verbose_name=_("End date"),
null=True,
blank=True,
help_text=_("Leave empty if tournament has one day"),
)
address = models.TextField(verbose_name=_("Address"), help_text=_("How to reach your tournament venue"))
additional_info_link = models.URLField(
null=True, blank=True, verbose_name=_("Link to additional tournament information")
)
organizer_name = models.CharField(max_length=255, verbose_name=_("Organizer name"))
organizer_phone = models.CharField(max_length=255, verbose_name=_("Organizer phone"))
organizer_additional_contact = models.CharField(
max_length=255,
verbose_name=_("Organizer additional contact"),
null=True,
blank=True,
help_text=_("Email, link to vk or something else"),
)
referee_name = models.CharField(max_length=255, verbose_name=_("Referee name"))
referee_phone = models.CharField(max_length=255, verbose_name=_("Referee phone"))
referee_additional_contact = models.CharField(
max_length=255,
verbose_name=_("Referee additional contact"),
null=True,
blank=True,
help_text=_("Email, link to vk or something else"),
)
referee_english = models.PositiveSmallIntegerField(
choices=[[0, _("No")], [1, _("Yes")]], default=1, verbose_name=_("Referee english")
)
max_number_of_participants = models.PositiveSmallIntegerField(
null=True, blank=True, verbose_name=_("Max number of participants")
)
number_of_games = models.PositiveSmallIntegerField(verbose_name=_("Number of hanchans"))
entry_fee = models.PositiveSmallIntegerField(
null=True, blank=True, verbose_name=_("Entry fee"), help_text=_("Leave empty if it is free tournament")
)
pantheon_needed = models.PositiveSmallIntegerField(
choices=[[0, _("No")], [1, _("Yes")]], default=1, verbose_name=_("Pantheon needed")
)
rules = models.PositiveSmallIntegerField(
verbose_name=_("Tournament rules"),
choices=[[0, _("EMA")], [1, _("WRC")], [2, _("JPML-A")], [3, _("JPML-B")], [4, _("Other")]],
default=0,
)
registration_type = models.PositiveSmallIntegerField(
choices=[[0, _("Open")], [1, _("Closed")], [2, _("Limited")]], verbose_name=_("Registration type"), default=0
)
additional_info = models.TextField(
verbose_name=_("Additional info"), help_text=_("More information about tournament")
)
allow_to_save_data = models.BooleanField(help_text=_("I allow to store my personal data"))
def __unicode__(self):
return ""
|
#!/usr/bin/python
text = " CREATE tabLe MyTable ( country varchar(45),ID integer ,XYI double ) ; "
def parseCreateTable(sql):
sql = sql.strip()
sql = sql[sql.find(" "):].strip()
keyWord = str(sql[:sql.find(" ")]).upper()
if keyWord != "TABLE":
print "error: expected keyword \"Table\""
return
sql = sql[sql.find(" "):].strip()
pos = min(sql.find(" "),sql.find("("))
if pos < 1:
print "error: expected Table Name"
return
tableName = str(sql[:pos].strip())
print "table: ", tableName
sql = sql[pos:].strip()
if sql[0] != "(":
print "error: expected \"(\""
return
if sql[-1] != ";":
print "error: expected \";\""
return
sql = sql[:len(sql)-1].strip()
if sql[-1] != ")":
print "error: expected \")\""
return
sql = sql[1:len(sql)-1].strip()
fields = sql.split(",")
for fi in fields:
part = fi.split()
if len(part) == 0:
print "error: expected field name"
return
elif len(part) == 1:
print "error: expected field type"
return
elif len(part) > 2:
print "error: expected \",\""
return
print "----------"
fieldName = part[0].strip()
fieldType = part[1].strip()
print "field name:", fieldName
print "field type:", fieldType
parseCreateTable(text)
|
def base10int(value, base):
if (int(value // base)):
return base10int(int(value // base), base) + str(value % base)
return str(value % base)
def b8to10(s):
mid = 0
for i in range(len(s)):
mid += int(s[len(s)-1-i])*(8 ** i)
return mid
n, k = map(str, input().split(" "))
for i in range(int(k)):
n = b8to10(n)
n = base10int(n, 9)
n = list(n)
for l in range(len(n)):
if n[l] == '8':
n[l] = '5'
n = "".join(n)
print(int(n)) |
import requests
import config
url = "https://api.yelp.com/v3/businesses/search"
headers = {
"Authorization" : "Bearer " + config.yelp_api_key
}
params = {
"location": "NYC",
"term": "Barber"
}
response = requests.get(url, headers=headers, params=params)
businesses = response.json()["businesses"]
names = [business['name'] for business in businesses if business['rating'] > 4.5]
print(names)
|
# -*- coding: utf-8 -*-
import re
import os
from botminer.util.picture import drawBar
from botminer.util.ip_statistic import ip_statistic
os.chdir('../log/')
def analys():
files = os.listdir('../log/')
os.chdir('../log/')
co = re.compile('Time:.*?event.*?0\n(.*?) ->.*?Port/Proto Range',re.S)
ips = set()
print '[log-ip-analyze] Starting analyzing scan log'
print '\n\n\n\n'
for file in files:
with open(file, 'r') as f:
content = f.read()
a = co.findall(content)
ips = ips.union(set(a))
statistics = {}
for ip in a:
if not statistics.has_key(ip):
statistics[ip] = 1
else:
statistics[ip] += 1
print '*'*100
for i in statistics.items():
print i
ip_distribution_statistics = ip_statistic(ips, 1)
print '-'*40+'statistics'+'-'*40
print 'ip amount:',len(ips)
print '***distribution***'
for key in ip_distribution_statistics:
print key,'---------'+ str(ip_distribution_statistics.get(key))
#drawBar(statistics)
#drawPie(statistics)
with open('../result/scan_log_analys_result','wb') as f:
for i in ips:
f.write(i)
f.write(os.linesep)
|
def random():
from random import randint
x=randint(0,50)
return x
def obtenerInt():
y = int(input("Ingrese su adivinanza: "))
return y
def respuesta(respuesta,intento):
if respuesta==intento:
print("Ha adivinado el numero. ")
exit()
elif respuesta > intento:
print("El numero ingresado es menor que la respuesta ")
return 1
else:
print("El numero ingresado es mayor que la respuesta ")
return 1
def adivinanza():
x=random()
y=obtenerInt()
h=respuesta(x,y)
while h!=0:
y=obtenerInt()
h=respuesta(x,y)
adivinanza()
|
""""
Given an array nums. We define a running sum of an array as runningSum[i] = sum(nums[0]…nums[i]).
Return the running sum of nums.
"""
# import List from typing to support hinting in function definition
from typing import List
class Solution:
nums = [1,2,3,4]
def runningSum(self, nums: List[int]) -> List[int]:
run_sum = 0
sum = []
for i in range(len(nums)):
sum.append(nums[i] + run_sum)
run_sum += nums[i]
return sum
# test
#nums = [1,2,3,4]
print(runningSum(self, nums))
# expected output [1,3,6,10]
|
"""
The restaurant application factory
"""
import os
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from database import db_session
from models import Restaurant, MenuItem
def create_app(test_config=None):
"""
A application factory to create, set and return the restaurant flask app instance
"""
# create and configure the application instance
app = Flask(__name__, instance_relative_config=False)
# set default configuration
app.config.from_mapping(SECRET_KEY='dev')
print(app.instance_path)
# check for testing
if test_config is None:
# Override default configuration if config file exists
app.config.from_pyfile('config.py', silent=True)
else:
# Load test configuration if it exists
app.config.from_mapping(test_config)
# Ensure instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.route('/')
@app.route('/restaurants/<int:restaurant_id>/')
def restaurantMenu(restaurant_id):
restaurant = db_session.query(
Restaurant).filter_by(id=restaurant_id).one()
menu_items = db_session.query(MenuItem).filter_by(
restaurant_id=restaurant.id).all()
return render_template('/menu.html', restaurant=restaurant, items=menu_items)
@app.route('/restaurants/<int:restaurant_id>/new/', methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
if request.method == 'POST':
newMenu = MenuItem(name=request.form[
'name'], restaurant_id=restaurant_id)
db_session.add(newMenu)
db_session.commit()
flash("New menu item: {} created".format(newMenu.name))
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('/newmenuitem.html', restaurant_id=restaurant_id)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/edit/', methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
editedItem = db_session.query(MenuItem).filter_by(id=menu_id).one()
oldName = editedItem.name
if request.method == 'POST':
newName = request.form['name']
editedItem.name = newName
db_session.add(editedItem)
db_session.commit()
flash("Menu item changed from {0} to {1}".format(
oldName, newName))
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('/editmenuitem.html', restaurant_id=restaurant_id, menu_id=menu_id, editedItem=editedItem)
@app.route('/restaurants/<int:restaurant_id>/<int:menu_id>/delete/', methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
toDelete = db_session.query(MenuItem).filter_by(id=menu_id).one()
if request.method == 'POST':
db_session.delete(toDelete)
db_session.commit()
flash("Menu item: {} deleted".format(toDelete.name))
return redirect(url_for('restaurantMenu', restaurant_id=restaurant_id))
else:
return render_template('/deletemenuitem.html', item=toDelete)
# Making a API endpoint (GET request) to list of menu items of a restaurant
@app.route('/restaurants/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = db_session.query(
Restaurant).filter_by(id=restaurant_id).one()
menu_items = db_session.query(MenuItem).filter_by(
restaurant_id=restaurant_id).all()
return jsonify(MenuItems=[item.serialize for item in menu_items])
@app.route('/restaurants/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def restaurantMenuItemJSON(restaurant_id, menu_id):
menu_item = db_session.query(MenuItem).filter_by(id=menu_id).one()
return jsonify(MenuItem=menu_item.serialize)
return app
if __name__ == '__main__':
app = create_app()
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
# Generated by Django 2.0.7 on 2018-08-29 18:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('GuildPosts', '0007_auto_20180829_1835'),
]
operations = [
migrations.RemoveField(
model_name='postmodel',
name='PostLink',
),
]
|
'''
import pdb
s = '0'
n = int(s)
pdb.set_trace() #运行到这里会暂停
print(10 / n)
'''
import logging
logging.basicConfig(level=logging.INFO)
#设置记录信息级别,输出信息
s = '0'
n = int(s)
logging.info('n = {}'.format(n))
print(10 / n) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-02 11:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('complaint', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='progress',
fields=[
('id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='progress', serialize=False, to='complaint.complaint')),
('action', models.TextField()),
('completed', models.BooleanField(default=False)),
('updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='resources',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('department', models.CharField(max_length=20)),
('working', models.BooleanField()),
],
),
]
|
#!/usr/bin/env python
import os
import sys
low = int(sys.argv[1])
high = int(sys.argv[2])
cmdPrefix = '~/spark-1.4.0/bin/spark-submit --master spark://spark4-master:7077 --driver-memory 20g --executor-memory 14g msg_spark_opt.py '
msgPathIn = 'hdfs://spark4-master:9000/user/yong/msg_analysis/msg_0616/'
msgPathOut = 'hdfs://spark4-master:9000/user/yong/msg_analysis/msg_0616_ovs_res/'
uuidPathIn = 'hdfs://spark4-master:9000/user/yong/ovs_state/ovs_state_0616_uuid'
#msgPathOut = 'hdfs://spark4-master:9000/user/yong/msg_analysis/msg_0616_libvirt_res/'
#uuidPathIn = 'hdfs://spark4-master:9000/user/yong/libvirt_state/libvirt_state_0616_uuid'
for i in xrange(low, high + 1):
index = '%02d' % i
print index
splitIndex = 'split_' + index
msgPathInSplit = msgPathIn + splitIndex
msgPathOutSplit = msgPathOut + splitIndex
cmd = cmdPrefix + ' ' + msgPathInSplit + ' ' + msgPathOutSplit + ' ' + uuidPathIn
print cmd
os.system(cmd)
print 'COMPLETE'
|
import re
import time
import requests
class Scraper(object):
api_url = None
default_headers = ()
default_params = ()
default_data = ()
cache_attrs = ()
def __init__(self, ratelimit=1):
self.ratelimit = ratelimit
self._last_request = 0
self.cookies = None
self.timeout = 20
def find_magnets(self, query=None, tv=False, movie=False, **kwargs):
"""Returns iterable of tuples (name, magnet_uri)."""
raise NotImplementedError
def _api_req(self, func='GET', path='', headers=(), params=(), data=()):
_params = dict(self.default_params)
_params.update(params)
_headers = dict(self.default_headers)
_headers.update(headers)
_data = dict(self.default_data)
_data.update(data)
# Sleep in 34% intervals of total wait time.
while time.time() < (self._last_request + (1.0 / self.ratelimit)):
time.sleep(0.34/self.ratelimit)
self._last_request = time.time()
if func == 'GET':
return requests.get(self.api_url + path, params=_params,
headers=_headers, cookies=self.cookies,
timeout=self.timeout).json()
elif func == 'POST':
return requests.post(self.api_url + path, headers=_headers,
params=_params, data=_data,
cookies=self.cookies,
timeout=self.timeout).json()
def api_get(self, path='', headers=(), **params):
return self._api_req(path=path, headers=headers, params=params)
def api_post(self, path='', headers=(), params=(), **data):
return self._api_req(func='POST', path=path, headers=headers,
params=params, data=data)
class TorrentApi(Scraper):
api_url = 'https://torrentapi.org/pubapi_v2.php'
default_headers = (('user-agent', 'ruin/1.0'),)
default_params = (('app_id', 'ruin'),
('limit', 100),
('token', None),)
cache_attrs = ('token','_last_request')
def __init__(self, token=None, ratelimit=0.5):
super(TorrentApi, self).__init__(ratelimit=ratelimit)
self.token = token
def api_get(self, path='', headers=(), **params):
result = super(TorrentApi, self).api_get(path=path, headers=headers,
token=self.token, **params)
if result.get('error_code') in (1, 2, 4):
self.token = super(TorrentApi, self).api_get(
get_token='get_token')['token']
return self.api_get(path=path, headers=headers, **params)
return result
def find_magnets(self, query=None, tv=False, movie=False, **kwargs):
"""Returns iterable of tuples (name, magnet_uri)."""
attempt = int(kwargs.get('attempt', 1))
ranked = int(kwargs.get('ranked', True))
category = kwargs.get('category')
if movie:
result = self.api_get(mode='search', search_imdb=kwargs['imdb'],
category=category, ranked=ranked)
elif tv:
result = self.api_get(mode='search', search_tvdb=kwargs['id'],
search_string=query, category=category,
ranked=ranked)
elif query:
result = self.api_get(mode='search', search_string=query,
category=category, ranked=ranked)
else:
result = self.api_get(mode='list', category=category,
ranked=ranked)
result = result.get('torrent_results')
if not result:
if attempt >= 3:
return []
time.sleep(1 << attempt)
kwargs['attempt'] = attempt + 1
return self.find_magnets(query=query, tv=tv, movie=movie, **kwargs)
return ((t['filename'], t['download']) for t in result)
class BitLord(Scraper):
api_url = 'https://bitlordsearch.com'
default_data = (('query', None),
('offset', 0),
('limit', 100),
('filters[field]', 'added'),
('filters[sort]', 'asc'),
('filters[time]', 4),
('filters[category]', 3),
('filters[adult]', 'true'),
('filters[risky]', 'false'),)
default_headers = (('X-Request-Token', None),)
def __init__(self, token=None, cookies=None, ratelimit=0.5):
super(BitLord, self).__init__(ratelimit=ratelimit)
self.token = token
self.cookies = cookies
def authenticate(self):
tkn_var_re = r'token: (.*)\n'
tkn_re = r"{} \+?= '(.*)'"
main_page = requests.get(self.api_url, timeout=self.timeout)
var = re.findall(tkn_var_re, main_page.text)[0]
self.token = ''.join(re.findall(tkn_re.format(var),
main_page.text))
self.cookies = main_page.cookies.get_dict()
def api_post(self, path='/get_list', headers=(), params=(), **data):
if not (self.cookies and self.token):
self.authenticate()
_headers = dict(headers)
_headers['X-Request-Token'] = self.token
_data = {}
for key, value in data.items():
# Translate filters e.g. `filters_category` -> `filters[category]`
if key.startswith('filters_'):
key = 'filters[{}]'.format(key[8:])
_data[key] = value
return super(BitLord, self).api_post(path=path, headers=_headers,
params=params, **_data)
@staticmethod
def filter_ascii_only(names_magnets):
for name, magnet in names_magnets:
try:
str(name)
str(magnet)
except UnicodeEncodeError:
continue
else:
yield (name, magnet.lower())
def find_magnets(self, query=None, tv=False, movie=False, **kwargs):
if movie:
query = '{} {}'.format(kwargs['title'], kwargs['year'])
results = self.api_post(query=query, filters_category=3,
filters_field='seeds', filter_sort='desc')
elif tv:
query = '{} {}'.format(kwargs['showname'], query)
results = self.api_post(query=query, filters_category=4)
elif query:
results = self.api_post(query=query)
else:
results = self.api_post()
results = results.get('content')
if not results:
return []
return self.filter_ascii_only(
((t['name'], t['magnet']) for t in results
if t['seeds'] > 0))
|
def maximum():
for x in range(100):
for y in range(100):
if((x|y) >= max(x,y))==False:
print x,y, (x|y), max(x,y)
return False
return True
print maximum()
|
# 알파코드(DFS)
import sys
sys.stdin=open("C:\Python-lecture\Python_lecture\section_7\input.txt", "rt")
def DFS(L, P):
global cnt
if L==n:
cnt+=1
for j in range(P):
print(chr(res[j]+64), end=' ')
print()
else:
for i in range(1, 27):
if code[L]==i:
res[P]=i
DFS(L+1, P+1)
elif i>=10 and code[L]==i//10 and code[L+1]==i%10:
res[P]=i
DFS(L+2, P+1)
if __name__=="__main__":
code=list(map(int, input()))
n=len(code)
# insert : n번째 인덱스에 -1을 삽입하라
code.insert(n, -1)
res=[0]*(n+3)
cnt=0
DFS(0, 0)
print(cnt) |
a=list(map(int,input().split()))
b=list(map(int,input().split()))
yo=0
for i in range(len(b)):
if b[i]==0:
yo +=1
flag=0
for c in range (len(b)):
if yo!=len(b):
if b[a[1]-1]!=0:
if b[c]>=b[a[1]-1]:
flag+=1
else:
continue
else:
if b[c]>b[a[1]-1]:
flag+=1
else:
continue
else:
break
print(flag)
|
# target = "https://ticket2.usj.co.jp/t/tkt/ei.do?t=3743|4157|3823|3840|3760|4182|3902|3913|3814&p=20|20|20|20|20|20|20|20|20&m=2"
target = "https://ticket2.usj.co.jp/t/tkt/ei.do?t=3743|4157|3823|3840|3760|4182|3902|3913&p=20|20|20|20|20|20|20|20&m=2"
new_target = "https://ticket2.usj.co.jp/t/tkt/ei.do?t=4648|4663|4655|4671|4628|4583|4614|4640|4716&p=20|20|20|20|20|20|20|20|20&m=2"
prdUrl = "https://ticket2.usj.co.jp/t/tkt/cartinput.do?t="
apiUrl = "https://ticket2.usj.co.jp/api/ticketinfo.do?ticketcode="
productTitle = {
"3743":"USJ7 - 스탠다드", #0
"4157":"USJ7 - 싱온투어", #1
"3823":"USJ7 - 플라잉공룡", #2
"3840":"USJ7 - 백드롭", #3
"3760":"USJ4 - 스탠다드", #4
"4182":"USJ4 - 싱온투어", #5
"3902":"USJ4 - 플라잉공룡", #6
"3913":"USJ4 - 미니언라이드", #7
# "3814":"USJ3 - 스탠다드", #8
}
prdData = {
'item_0' : {
'code' : '3743',
'tct_type' : 'USJ7',
'prd_type' : '스탠다드',
'title' : 'USJ7-스탠다드'
},
'item_1' : {
'code' : '4157',
'code2' : '4628',
'tct_type' : 'USJ7',
'prd_type' : '싱온투어',
'title' : 'USJ7-싱온투어',
},
'item_2' : {
'code' : '3823',
'code2' : '4583',
'tct_type' : 'USJ7',
'prd_type' : '플라잉 공룡',
'title' : 'USJ7-플라잉 공룡',
},
'item_3' : {
'code' : '3840',
'code2' : '4614',
'tct_type' : 'USJ7',
'prd_type' : '백드롭',
'title' : 'USJ7-백드롭',
},
'item_4' : {
'code' : '3760',
'tct_type' : 'USJ4',
'prd_type' : '스탠다드',
'title' : 'USJ4-스탠다드',
},
'item_5' : {
'code' : '4182',
'code2' : '4663',
'tct_type' : 'USJ4',
'prd_type' : '싱온투어',
'title' : 'USJ4-싱온투어',
},
'item_6' : {
'code' : '3902',
'code2' : '4648',
'tct_type' : 'USJ4',
'prd_type' : '플라잉 공룡',
'title' : 'USJ4-플라잉 공룡',
},
'item_7' :{
'code' : '3913',
'code2' : '4655',
'tct_type' : 'USJ4',
'prd_type' : '미니언라이드',
'title' : 'USJ4-미니언라이드',
},
'item_8':{
'code' : 'null',
'code2' : '4671',
'tct_type' : 'USJ4',
'prd_type' : '스페이스 판타지 더 라이드',
'title' : 'USJ4-스페이스 판타지 더 라이드',
},
'item_9':{
'code' : 'null',
'code2' : '4640',
'tct_type' : 'USJ7',
'prd_type' : '스페이스 판타지 더 라이드',
'title' : 'USJ7-스페이스 판타지 더 라이드',
}
#code2 4640 = usj7, 스페이스 판타지 더 라이드
#code2 4716 = usj3, 스페이스 판타지 더 라이드
}
prdUrl = "https://ticket2.usj.co.jp/t/tkt/cartinput.do?t="
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import urllib.robotparser
import urllib.parse
from datetime import date
from urllib.parse import urlencode
from urllib.request import Request, urlopen
def download(url, headers=None, user_agent='wswp', proxy=None, num_retries=2):
print('Downloading: ', url)
headers = {'User-agent': user_agent}
request = Request(url, headers=headers)
opener = urllib.request.build_opener()
if proxy:
proxy_params = {urllib.request.urlparse(url).scheme:proxy}
opener.add_handler(urllib.request.ProxyHandler(proxy_params))
try:
html = urlopen(request).read().decode('utf-8')
except HTTPError as e:
print("Download error: ", e.reason)
html = None
if num_retries >0 :
if hasattr(e, 'code') and 500 <= e.code < 600:
# 5XX HTTP 오류 시 재시도
return download(url, num_retries -1)
return html
"""Action functions"""
def title_by_code(code, value = 'code2'):
for key in prdData.keys():
try:
if str(code) == prdData[key][value]:
obj = prdData[key]['title']
except:pass
return obj
def dateformatter(year,month,day):
"""date time formater -> 함수화 진행"""
date_string = str(year) + "-" + str(month) + "-" + str(day)
date_time = date(*map(int, date_string.split('-'))).isoformat()
return date_time
def postData(data):
""" post section"""
url = "http://ipacktour.com/test/python"
post_fields = data # PARAMS
request = Request(url, urlencode(post_fields).encode())
json = urlopen(request).read().decode()
print(json)
def send_data(target_data, ipack_post, text_data=None, telegram_post=False):
if ipack_post == False: # 만약에 ipack_post가 False 라면
pass # ipack에 post할 그릇에 저장된 데이터를 ipack에 post 를 안한다
elif ipack_post == True:
postData(target_data) # ipack에 post할 그릇에 저장된 데이터를 ipack에 post 한다
if telegram_post == True: # 만역에 telegram_post 가 True라면
try:
print(text_data)
#ge_fc.sendTelegram(text_data) # telegram에 post한다
except IndexError as e:
pass
elif telegram_post == False: # 만역에 telegram_post 가 False라면
pass # telegram에 post를 안한다.
|
from celery import Celery
import sys
import os
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import tensorflow as tf
import urllib.request
from PIL import Image
import string
import random
from io import BytesIO
import mysql.connector
import time
app = Celery('tasks', broker='amqp://guest@localhost//')
mydb = mysql.connector.connect(
host="192.168.1.67",
user="python",
passwd="python",
database="biomass_database"
)
@app.task
def handle_images(content):
subfolder_name = content['biomass_name'].lower().replace(' ','_')
url_images = content['url_images']
full_path = "/data/tera_1/partage/dataset/train/{}".format(subfolder_name)
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_uppercase
return ''.join(random.choice(letters) for i in range(stringLength))
print ("Check if {} exists".format(full_path))
if os.path.exists(full_path):
print("Path does exist")
else:
print("Path does not exist. Creating.")
os.makedirs("/data/tera_1/partage/dataset/train/{}".format(subfolder_name))
os.makedirs("/data/tera_1/partage/dataset/test/{}".format(subfolder_name))
os.makedirs("/data/tera_1/partage/dataset/final/{}".format(subfolder_name))
cursor = mydb.cursor()
query_insert_class = '''
INSERT INTO biomass (name,path_dataset)
VALUES ('{}','{}');
'''
cursor.execute(query_insert_class.format(subfolder_name, full_path))
mydb.commit()
train_path = "/data/tera_1/partage/dataset/train/{}".format(subfolder_name)
test_path = "/data/tera_1/partage/dataset/test/{}".format(subfolder_name)
final_path = "/data/tera_1/partage/dataset/final/{}".format(subfolder_name)
file_count_train = 0
for _, _, filenames in os.walk(train_path):
file_count_train += len(filenames)
file_count_test = 0
for _, _, filenames in os.walk(test_path):
file_count_test += len(filenames)
file_count_final = 0
for _, _, filenames in os.walk(final_path):
file_count_final += len(filenames)
file_total = file_count_final + file_count_test + file_count_train
if file_total == 0:
file_total = 1
print("Current repartition is :")
print("{} test".format(file_count_test/file_total))
print("{} train".format(file_count_train/file_total))
print("{} final".format(file_count_final/file_total))
## Balance datasets
# Get deltas
delta_train = 0.75 - (file_count_train/file_total)
delta_test = 0.15 - (file_count_test/file_total)
delta_final = 0.10 - (file_count_final/file_total)
print("Delta test : {}".format(delta_test))
print("Delta train : {}".format(delta_train))
print("Delta final : {}".format(delta_final))
destination_path = ""
# Download each image + save in path
for url in url_images:
if delta_train > delta_test and delta_train > delta_final :
print("Delta train is biggest. Add to train")
destination_path = train_path
file_count_train += 1
elif delta_test > delta_train and delta_test > delta_final :
print("Delta test is biggest. Add to test")
destination_path = test_path
file_count_test += 1
else :
print("Delta final is biggest. Add to final")
destination_path = final_path
file_count_final += 1
print("Retrieving image at URL {}".format(url))
with urllib.request.urlopen(url) as response:
im = Image.open(BytesIO(response.read()))
path_save = "{0}/{1}.png".format(destination_path,randomString())
im.save(path_save,"PNG")
print("Image saved at {}".format(path_save))
file_total += 1
#Recompute deltas
delta_train = 0.75 - (file_count_train/file_total)
delta_test = 0.15 - (file_count_test/file_total)
delta_final = 0.10 - (file_count_final/file_total)
print("Delta test : {}".format(delta_test))
print("Delta train : {}".format(delta_train))
print("Delta final : {}".format(delta_final))
## End balance
if(file_count_train >= 100):
print("New folder exceeds threshold. Adding ML_class")
cursor = mydb.cursor()
query_update_class = '''
SET SQL_SAFE_UPDATES=0;
SET @new_class = (select MAX(class_ML)+1 from biomass);
UPDATE biomass SET class_ML = @new_class WHERE biomass.name='{}';
SET SQL_SAFE_UPDATES=1;
'''
results = cursor.execute(query_update_class.format(subfolder_name),multi=True)
for cur in results:
print('cursor:', cur)
if cur.with_rows:
print('result:', cur.fetchall())
mydb.commit()
@app.task
def train_model():
nb_classes = 0
epochs = 35
for _, dirnames, _ in os.walk(path):
nb_classes += len(dirnames)
print("Starting script with {} classes & {} epochs".format(nb_classes,epochs))
img_width, img_height = 250, 250
train_data_dir = "/data/tera_1/partage/dataset/train"
validation_data_dir = "/data/tera_1/partage/dataset/test"
nb_train_samples = 2000
nb_validation_samples = 800
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height,3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rotation_range=90,
width_shift_range=0.3,
height_shift_range=0.3,
rescale=1./255,
shear_range=0.2,
zoom_range=0.3,
horizontal_flip=True,
cval=255,
fill_mode='constant')
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
color_mode='rgb',
shuffle=True,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
test_generator = test_datagen.flow_from_directory(
validation_data_dir,
shuffle=True,
color_mode='rgb',
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
model.summary()
filepath='/data/tera_1/partage/dataset/biomasse-checkpoint.hdf5'
checkpoint = ModelCheckpoint(filepath, verbose=1)
callbacks_list = [checkpoint]
history = model.fit_generator(
train_generator,
validation_data = test_generator,
validation_steps = 100,
steps_per_epoch = (100 * nb_classes),
epochs=epochs,callbacks=callbacks_list)
model.save('/data/tera_1/partage/dataset/biomasse.hdf5')
return "OK" |
import fileinput
def main():
# entrada de dados por arquivo
indata = []
for line in fileinput.input(files='in.txt'):
indata.append(line.rstrip('\n'))
interval = indata[2].split(' ')
del indata[-1]
Amin, Amax = interval
Kmax, R = indata
# casting
Kmax = int(Kmax)
R = int(R)
Amin = int(Amin)
Amax = int(Amax)
if(Kmax < 5):
print("*****************")
print("Erro - De acordo com as especificações, Kmax deve ser maior ou igual à 5.")
print("Abortando execução")
print("*****************")
exit()
return Amin, Amax, Kmax, R |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from typing import Any, Dict
from mock import patch
from pyhocon import ConfigFactory
from databuilder import Scoped
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
class TestSqlAlchemyExtractor(unittest.TestCase):
def setUp(self) -> None:
config_dict = {
'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',
'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;'
}
self.conf = ConfigFactory.from_dict(config_dict)
@patch.object(SQLAlchemyExtractor, '_get_connection')
def test_extraction_with_empty_query_result(self: Any,
mock_method: Any) -> None:
"""
Test Extraction with empty result from query
"""
extractor = SQLAlchemyExtractor()
extractor.results = ['']
extractor.init(Scoped.get_scoped_conf(conf=self.conf,
scope=extractor.get_scope()))
results = extractor.extract()
self.assertEqual(results, '')
@patch.object(SQLAlchemyExtractor, '_get_connection')
def test_extraction_with_single_query_result(self: Any,
mock_method: Any) -> None:
"""
Test Extraction from single result from query
"""
extractor = SQLAlchemyExtractor()
extractor.results = [('test_result')]
extractor.init(Scoped.get_scoped_conf(conf=self.conf,
scope=extractor.get_scope()))
results = extractor.extract()
self.assertEqual(results, 'test_result')
@patch.object(SQLAlchemyExtractor, '_get_connection')
def test_extraction_with_multiple_query_result(self: Any,
mock_method: Any) -> None:
"""
Test Extraction from list of results from query
"""
extractor = SQLAlchemyExtractor()
extractor.results = ['test_result', 'test_result2', 'test_result3']
extractor.init(Scoped.get_scoped_conf(conf=self.conf,
scope=extractor.get_scope()))
result = [extractor.extract() for _ in range(3)]
self.assertEqual(len(result), 3)
self.assertEqual(result,
['test_result', 'test_result2', 'test_result3'])
@patch.object(SQLAlchemyExtractor, '_get_connection')
def test_extraction_with_model_class(self: Any, mock_method: Any) -> None:
"""
Test Extraction using model class
"""
config_dict = {
'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',
'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;',
'extractor.sqlalchemy.model_class':
'tests.unit.extractor.test_sql_alchemy_extractor.TableMetadataResult'
}
self.conf = ConfigFactory.from_dict(config_dict)
extractor = SQLAlchemyExtractor()
extractor.results = [dict(database='test_database',
schema='test_schema',
name='test_table',
description='test_description',
column_name='test_column_name',
column_type='test_column_type',
column_comment='test_column_comment',
owner='test_owner')]
extractor.init(Scoped.get_scoped_conf(conf=self.conf,
scope=extractor.get_scope()))
result = extractor.extract()
self.assertIsInstance(result, TableMetadataResult)
self.assertEqual(result.name, 'test_table')
@patch('databuilder.extractor.sql_alchemy_extractor.create_engine')
def test_get_connection(self: Any, mock_method: Any) -> None:
"""
Test that configs are passed through correctly to the _get_connection method
"""
extractor = SQLAlchemyExtractor()
config_dict: Dict[str, Any] = {
'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',
'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;'
}
conf = ConfigFactory.from_dict(config_dict)
extractor.init(Scoped.get_scoped_conf(conf=conf,
scope=extractor.get_scope()))
extractor._get_connection()
mock_method.assert_called_with('TEST_CONNECTION', connect_args={})
extractor = SQLAlchemyExtractor()
config_dict = {
'extractor.sqlalchemy.conn_string': 'TEST_CONNECTION',
'extractor.sqlalchemy.extract_sql': 'SELECT 1 FROM TEST_TABLE;',
'extractor.sqlalchemy.connect_args': {"protocol": "https"},
}
conf = ConfigFactory.from_dict(config_dict)
extractor.init(Scoped.get_scoped_conf(conf=conf,
scope=extractor.get_scope()))
extractor._get_connection()
mock_method.assert_called_with('TEST_CONNECTION', connect_args={"protocol": "https"})
class TableMetadataResult:
"""
Table metadata result model.
SQL result has one row per column
"""
def __init__(self,
database: str,
schema: str,
name: str,
description: str,
column_name: str,
column_type: str,
column_comment: str,
owner: str
) -> None:
self.database = database
self.schema = schema
self.name = name
self.description = description
self.column_name = column_name
self.column_type = column_type
self.column_comment = column_comment
self.owner = owner
|
import tornado.web
import json
import config
class Send_task(tornado.web.RequestHandler):
"""Provide API for frontend to perform task creation and result visualization
"""
def get(self, *arg, **kwargs):
prob_dict = {
'type' : 'ping',
'url': 'www.baidu.com'
}
# self.write(json.dumps(prob_dict))
self.write(prob_dict)
def post(self):
"""
"""
# get arguments from POST request body
# type = get_body_argument('type', None)
# self.write(type)
class Receive_mac(tornado.web.RequestHandler):
"""
"""
def post(self, *args, **kwargs):
# receive
mac = eval(self.get_argument("mac"))
print(mac)
self.write(mac)
class Receive_data(tornado.web.RequestHandler):
"""
"""
def post(self, *args, **kwargs):
# receive
task = eval(self.get_argument("task"))
print(task)
self.write(task)
# class Send_task(tornado.web.Websocket):
|
#Q: Double the given number using lambda function
def double(a):
x=lambda a: 2*a
print(x(a))
double(25)
|
from typing import List
from models.chat_state import ChatState
from models.event_pattern import EventPattern
from constants.mattermost_status import MattermostStatus
class User:
"""
Represents a single user, identified by its Mattermost username.
"""
_mattermost_login = ''
_gcal_token_file = ''
_patterns: List[EventPattern] = []
_id: str
_default_chat_state: ChatState
def __init__(self, user_settings):
self._mattermost_login = user_settings['mattermost_login']
self._gcal_token_file = user_settings['gcal_token_file']
for pattern_settings in user_settings['patterns']:
self._patterns.append(EventPattern(pattern_settings))
self._id = self._mattermost_login
self._default_chat_state = ChatState(
user_settings['default_chat_state']['suffix'],
MattermostStatus.from_string(user_settings['default_chat_state']['status'])
)
def get_id(self):
return self._id
def get_patterns(self) -> [EventPattern]:
return self._patterns
def get_default_chat_state(self) -> ChatState:
return self._default_chat_state
def get_mattermost_login(self) -> str:
return self._mattermost_login
def get_gcal_token_file_name(self) -> str:
return self._gcal_token_file
|
import torch
import torch.nn.modules as nn
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
class RNNNet(nn.Module):
def __init__(self):
super(RNNNet, self).__init__()
self.rnn_layer = nn.RNN(input_size=28, hidden_size=28, num_layers=1, batch_first=True)
self.fc_layer = nn.Linear(28, 10)
def forward(self, x): # [N 1 28 28]
x = x.reshape(x.size(0), -1, 28) # [N 28 28]
x = self.rnn_layer(x)
print(x)
x = x[:,-1,:]
return x
if __name__ == '__main__':
x = torch.randn(2, 1, 28, 28)
net = RNNNet()
y = net(x)
print(y)
# train_dataset = datasets.MNIST('./data', train=True, transform=transforms.ToTensor(), download=True)
# train_loader = DataLoader(dataset=train_dataset, batch_size=1000, shuffle=True)
#
# test_dataset = datasets.MNIST('./data', train=False, transform=transforms.ToTensor(), download=False)
# test_loader = DataLoader(dataset=test_dataset, batch_size=1000, shuffle=True)
#
# net = RNNNet().cuda()
# optimizer = torch.optim.Adam(net.parameters())
# loss_fn = nn.CrossEntropyLoss()
#
# for epoch in range(1000):
# for xs, ys in train_loader:
# xs, ys = xs.cuda(), ys.cuda()
# output = net(xs)
# loss = loss_fn(output, ys)
#
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
#
# for test_xs, test_ys in test_loader:
# test_xs, test_ys = test_xs.cuda(), test_ys.cuda()
# test_out = net(test_xs)
#
# test_idx = torch.argmax(torch.log_softmax(test_out, dim=1), dim=1)
# accuracy = torch.mean(torch.sum(torch.eq(test_idx, test_ys)))
# print(accuracy)
# break
|
# Generated by Django 2.1 on 2018-08-29 07:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_auto_20180828_2309'),
]
operations = [
migrations.CreateModel(
name='OrderModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('tel', models.CharField(max_length=100)),
('products', models.TextField()),
],
),
]
|
from django.contrib import admin
from django.urls import path
from Main import views
urlpatterns = [
path('music/', views.music_search),
]
|
"""
Given an array of numbers, find the length of the longest increasing subsequence
in the array. The subsequence does not necessarily have to be contiguous.
For example, given the array
[0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15],
the longest increasing subsequence has length 6: it is 0, 2, 6, 9, 11, 15
"""
def RightSubSequence(s: list) -> int:
lonest_sub = []
for i in range(len(s)):
if i > 0:
if s[i] > s[i-1]:
continue
if s[i] in lonest_sub:
continue
cur_sub = []
for j in range(i+1, len(s)):
if len(cur_sub) == 0:
if len(lonest_sub) > 0:
if s[i] < lonest_sub[0]:
continue
cur_sub.append(s[i])
elif s[j] > cur_sub[-1]:
cur_sub.append(s[j])
else:
continue
if len(cur_sub) > len(lonest_sub):
lonest_sub = cur_sub
return len(lonest_sub)
def LeftSubSequence(s: list) -> int:
cur_sub = []
for j in range(len(s)):
if len(cur_sub) == 0:
cur_sub.append(s[j])
elif s[j] < cur_sub[-1]:
cur_sub.append(s[j])
else:
continue
return len(cur_sub)
def longestSubSequence(s: list) -> int:
longest = 0
for i in range(len(s)):
cur = LeftSubSequence(s[i::-1]) + RightSubSequence(s[i:])
if cur > longest:
longest = cur
return longest-1
if __name__ == "__main__":
assert longestSubSequence([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]) == 6 |
#Tatiana Borta
#10023231
#CA5 part_a
def add(first, second):#add numbers
return map(lambda x, y: x+y, first, second)
def substract(values):#substract two numbers
return reduce(lambda x, y: x-y, values)
def devide(first, second):#devide two numbers
return map(lambda x, y: x/float(y) if y != 0 else 'nan',first, second)
def multiply(values):#multiplying two numbers
return reduce(lambda x, y: x*y, values)
def exponential(values): #exponential
return reduce(lambda x, y: x**float(y), values)
def cube(num): #cube
return map(lambda x: x**3, num)
def sqrt(num1): #square root
return map(lambda x: x ** (1/2.0), num1)
def lessthanmean(values): # numbers less than mean
mean = sum(values)/len(values)
return filter(lambda x: x<mean, values)
def fizzbuzz(values): #and legendary fizzbuzz
return filter(lambda x: x%3==0 and x%5==0, values)
def pythagorean(n):#as generator
for x in range(1,n):
for y in range(x,n):
for z in range(y,n):
if x**2 + y**2 == z**2:
yield (x,y,z)
pyt = pythagorean(50)
for v in pyt:
print 'Pythagorean as generator\n', v,
#as list comprehension
pythagorean = [(x,y,z) for x in range(1,30) for y in range(x,30) for z in range(y,30) if x**2 + y**2 == z**2]
print 'Pythagorean as list comprehension: \n', pythagorean
print 'Addition function: ', add([12, 45, 34, 23], [2, 4, 6, 0])
print 'Substraction function: ', substract([45, 34, 12])
print 'Devision function: ', devide([2, 4,6], [3, 5, 0])
print 'Multiplying function: ', multiply([2, 4, 6])
print 'Exponential function: ',exponential([3, 3, 2])
print 'Cube function: ', cube([1, 2, 3, 5, 7, 9])
print 'Square root function: ', sqrt([2, 3, 4, 5])
print 'Less than mean :', lessthanmean([22, 33, 44, 55, 66])
print 'Legendary FizzBuzz: ', fizzbuzz([15, 34, 30, 100, 9, 20, 60])
|
import hashlib
with open("min.jpg","rb") as file:
string = (file.read())
m = hashlib.md5()
m.update(string)
result = m.digest()
print(result)
|
import pystan
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import sys
import time
import arviz as az
import xarray as xr
#Modify system pathway to ensure import works
sys.path.insert(1,'/Users/laurence/Desktop/Neuroscience/kevin_projects/code/mousetask/models/mouse_task_GLM_parameters')
#Performance checks
start_time = time.time()
from GLM_classes import Data
model = """
// Stan code for a mixture-of-exponentials model on the distracer rewards task
data {
//Data variables
int<lower=0> nTrials;
// int<lower=0,upper=1> trial_types[nTrials]; - not needed for synethic data
int<lower=-1,upper=1> choices[nTrials]; //
int<lower=0,upper=1> outcomes_cherry[nTrials];
int<lower=0,upper=1> outcomes_grape[nTrials];
//Test data variables
//int<lower=0> nTrials_test;
//int<lower=0,upper=1> trial_types_test[nTrials_test];
//int<lower=-1,upper=1> choices_test[nTrials_test];
//int<lower=0,upper=1> outcomes_cherry_test[nTrials_test];
//int<lower=0,upper=1> outcomes_grape_test[nTrials_test];
// What is this? int inc[3];
}
parameters {
real u_cherry;
real u_grape;
real u_nothing;
real<lower=0> beta_rl;
real<lower=0> beta_habits;
real beta_bias;
real<lower=0, upper=1> alpha_rl;
real<lower=0, upper=1> alpha_habits;
}
transformed parameters {
real log_lik; // Accumulator for log-likelihood
// Name-space for the loop over trials
{
real Q; // The hidden state of each agent
real H; // The hidden state of each agent
real Qeff;
real u_trial;
log_lik = 0;
Q = 0;
H = 0;
for (t_i in 1:nTrials) {
// if (trial_types[t_i] == 1)- needed for bev data not syth data
Qeff = choices[t_i]*(beta_rl*Q + beta_habits*H + beta_bias);
log_lik = log_lik + log(exp(Qeff) / (exp(Qeff) + exp(-1*Qeff)));
// RL Learning
if (outcomes_cherry[t_i] == 1 && outcomes_grape[t_i] == 1){
u_trial = 1;
}
else if (outcomes_grape[t_i] == 1){
u_trial = u_grape;
}
else if (outcomes_cherry[t_i] == 1){
u_trial = u_cherry;
}
else {
u_trial = u_nothing;
}
Q = (1-alpha_rl)*Q + alpha_rl * choices[t_i] * u_trial;
// Habits learning
H = (1- alpha_habits)* H + alpha_habits * choices[t_i];
}
}
}
model {
// Priors
u_cherry ~ normal(0, 1);
u_grape ~ normal(0, 1);
u_nothing ~ normal(0, 1);
beta_rl ~ normal(0, 1);
beta_bias ~ normal(0, 1);
alpha_rl ~ beta(3,3);
alpha_habits ~ beta(3,3);
// increment log likelihood
target += log_lik;
}
"""
#Load Data from a personal library that generated synthetic data
data = Data("train_b_data")._data_frame
#Map data from data frame to stan variables
choices = data["left_choices"]
choices = np.array(choices, dtype=np.int)
for i in range(len(choices)):
if choices[i] == 0:
choices[i] = -1
nTrials = len(choices)
outcomes_grape = data["left_rewards"]
outcomes_grape = np.array(outcomes_grape, dtype=np.int)
outcomes_cherry = data["right_rewards"]
outcomes_cherry = np.array(outcomes_cherry, dtype=np.int)
#Put the data in a dictionary
data = {'nTrials': nTrials,
'choices': choices,
'outcomes_grape': outcomes_grape,
'outcomes_cherry': outcomes_cherry}
# Compile the model
sm = pystan.StanModel(model_code=model)
# Train the model and generate samples
fit = sm.sampling(data=data, iter=1000, chains=4, warmup=500, thin=1, seed=101)
print(fit)
#Create df for the sample data
summary_dict = fit.summary()
df = pd.DataFrame(summary_dict['summary'],
columns=summary_dict['summary_colnames'],
index=summary_dict['summary_rownames'])
az_data = az.from_pystan(
posterior=fit,
log_likelihood={"choices": "log_lik"},
)
# # Parameter estimation
# param_est = sm.optimizing(data=data)
# print(param_est)
# #Extract the traces
# u_cherry = fit['u_cherry']
# u_grape = fit['u_grape']
# u_nothing = fit['u_nothing']
# beta_rl = fit['beta_rl']
# beta_habits = fit['beta_habits']
# beta_bias = fit['beta_bias']
# alpha_rl = fit['alpha_rl']
# alpha_habits = fit['alpha_habits']
#
# def plot_posteriors(param, param_name='parameter'):
# """Plot the trace and posterior of a parameter."""
#
# # Summary statistics
# mean = np.mean(param)
# # median = np.median(param)
# cred_min, cred_max = np.percentile(param, 2.5), np.percentile(param, 97.5)
#
# # Plotting traces
# # plt.subplot(2,1,1)
# # plt.plot(param)
# # plt.xlabel('samples')
# # plt.ylabel(param_name)
# # plt.axhline(mean, color='r', lw=2, linestyle='--')
# # # plt.axhline(median, color='c', lw=2, linestyle='--')
# # plt.axhline(cred_min, linestyle=':', color='k', alpha=0.2)
# # plt.axhline(cred_max, linestyle=':', color='k', alpha=0.2)
# # plt.title('Trace and Posterior Distribution for {}'.format(param_name))
#
# #Plot Posterior Distributions
# plt.title('Posterior Distribution for {}'.format(param_name))
# plt.hist(param, 30, density=True); sns.kdeplot(param, shade=True)
# plt.xlabel(param_name)
# plt.ylabel('density')
# plt.axvline(mean, color='r', lw=2, linestyle='--',label='mean')
# # plt.axvline(median, color='c', lw=2, linestyle='--',label='median')
# plt.axvline(cred_min, linestyle=':', color='k', alpha=0.2, label='95% CI')
# plt.axvline(cred_max, linestyle=':', color='k', alpha=0.2)
# plt.gcf().tight_layout()
# # plt.legend()
# plt.show()
#
# plot_posteriors(u_cherry, "u_cherry")
# plot_posteriors(u_grape, "u_grape")
# plot_posteriors(u_nothing, "u_nothing")
# plot_posteriors(beta_rl, "beta_rl")
# plot_posteriors(beta_habits, "beta_habits")
# plot_posteriors(beta_bias, "beta_bias")
# plot_posteriors(alpha_rl, "alpha_rl")
# plot_posteriors(alpha_habits, "alpha_habits")
#
#Print the time of the process
print("")
print("--- %s seconds ---" % (time.time() - start_time))
print("")
|
import datetime
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from engineers.models import Engineer
from engineers.forms import EngineerAddForm
from engineers.views import EngineerEditView
class EngineerFormTest(TestCase):
def test_EngineerForm_valid(self):
form = EngineerAddForm(
data={
'engineer_name': 'Lo Lo',
'mobile': '9898887888'
}
)
self.assertTrue(form.is_valid())
def test_EngineerForm_invalid_mobile(self):
form = EngineerAddForm(
data={
'engineer_name': 'Lo Lo',
'mobile': '98988878'
}
)
self.assertFalse(form.is_valid())
def test_EngineerForm_blank_mobile(self):
form = EngineerAddForm(
data={
'engineer_name': 'Lo Lo',
'mobile': ''
}
)
self.assertFalse(form.is_valid())
class EngineerTestCases(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create(
email = 'test@test.com',
first_name = 'Mike',
username = 'test@test.com',
password = make_password('test@test ')
)
cls.engineer = Engineer.objects.create(
added_by = cls.user,
engineer_name = 'GoGo Go',
mobile = '989878678'
)
def setUp(self):
self.user.refresh_from_db()
self.engineer.refresh_from_db()
def test_engineer_page_without_login(self):
r = self.client.get(reverse('engineers:engineer_view'))
self.assertEqual(r.status_code, 302)
def test_engineer_create(self):
self.assertEqual(self.engineer.engineer_name, 'GoGo Go')
self.assertEqual(self.engineer.added_by, self.user)
def test_engineer_page_after_login(self):
user_login = self.client.login(username=self.user.email, password='test@test ')
self.assertTrue(user_login)
response = self.client.get(reverse('engineers:engineer_view'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.engineer.engineer_name)
self.assertContains(response, self.engineer.mobile)
def test_engineer_edit_view_valid_details(self):
self.client.force_login(self.user)
data = {
'engineer_name': 'Jawla Pasand',
'mobile':'9888778889',
'eng-id':self.engineer.id
}
response = self.client.post(reverse('engineers:engineer_edit_view'), data)
self.assertEqual(response.status_code, 302)
page = self.client.get(reverse('engineers:engineer_view'))
self.assertEqual(page.status_code, 200)
self.assertNotIn(b'GoGo Go', page.content)
self.assertIn(b'Jawla', page.content)
self.client.logout()
def test_engineer_edit_view_invalid_details(self):
self.client.force_login(self.user)
data = {
'engineer_name': 'Jawla Pasand',
'eng-id':self.engineer.id
}
response = self.client.post(reverse('engineers:engineer_edit_view'), data)
self.assertEqual(response.status_code, 302)
page = self.client.get(reverse('engineers:engineer_view'))
self.assertEqual(page.status_code, 200)
self.assertNotIn(b'Jawla', page.content)
self.assertIn(b'GoGo Go', page.content)
self.client.logout()
def test_engineer_delete_view_valid_id(self):
self.client.force_login(self.user)
data = {'engineer-id':self.engineer.id}
response = self.client.post(reverse('engineers:engineer_delete_view'),
data=data)
page = self.client.get(reverse('engineers:engineer_view'))
self.assertEqual(response.status_code, 302)
self.assertEqual(page.status_code, 200)
self.assertNotIn(b'GoGo Go', page.content)
self.assertNotIn(b'Jawla Pasand', page.content)
def test_engineer_delete_view_invalid_id(self):
self.client.force_login(self.user)
data = {'engineer-id':self.engineer.id + 1}
response = self.client.post(reverse('engineers:engineer_delete_view'),
data=data)
self.assertEqual(response.status_code, 302)
page = self.client.get(reverse('engineers:engineer_view'))
self.assertEqual(page.status_code, 200)
self.assertIn(b'Failed', page.content)
|
import SimpleITK as sitk
import numpy as np
import csv
import os
from PIL import Image
# perskaitom originalu komp. tomografijos paveiksla ir grazinam ji kaip numpy image
def load_itk_image(filename):
itk_image = sitk.ReadImage(filename)
numpy_image = sitk.GetArrayFromImage(itk_image)
numpy_origin = np.array(list(reversed(itk_image.GetOrigin())))
numpy_spacing = np.array(list(reversed(itk_image.GetSpacing())))
return numpy_image, numpy_origin, numpy_spacing
# perskaitom kandidatu CSV faila
def read_csv(filename):
lines = []
with open(filename, newline='') as f:
csv_reader = csv.reader(f)
for line in csv_reader:
lines.append(line)
return lines
# pakeiciam tikro pasaulio koordinates i voxel koordinates
def world_to_voxel_coord(world_coord, origin, spacing):
stretched_voxel_coord = np.absolute(world_coord - origin)
voxel_coord = stretched_voxel_coord / spacing
return voxel_coord
# extract features from candidates
def normalize_planes(npz_array):
maxHU = 400.
minHU = -1000.
npz_array = (npz_array - minHU) / (maxHU - minHU)
npz_array[npz_array > 1] = 1.
npz_array[npz_array < 0] = 0.
return npz_array
# randam absoliutu kelia iki data katalogo
data_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
data_path = os.path.abspath(os.path.join(data_path, os.pardir))
data_path = os.path.abspath(os.path.join(data_path, 'data'))
# randam absoliucius kelius iki subsets katalogu
subset_dirs = []
for root, dirs, files in os.walk(data_path):
for d in dirs:
if d.startswith("subset"):
subset_path = os.path.abspath(os.path.join(data_path, d))
subset_dirs.append(subset_path)
all_mhd_file_paths = []
# iteruojam subsets, ieskodami mhd failu rekursyviai
for s in subset_dirs:
for root, dirs, files in os.walk(s):
for file in files:
if file.endswith(".mhd"):
all_mhd_file_paths.append(os.path.abspath(os.path.join(s, file)))
subsets_for_mhds = []
# randam subset numeri mhd failui
for path in all_mhd_file_paths:
subs = path.split("subset")
subsets_for_mhds.append(subs[1][0])
# randam absoliutu kandidatu failo kelia
candidates_path = os.path.abspath(os.path.join(data_path, 'candidates_V2.csv'))
# skaitom kandidatus is csv failo
cands = read_csv(candidates_path)
pics_path = os.path.abspath(os.path.join(data_path, 'pics'))
if not os.path.exists(pics_path):
os.makedirs(pics_path)
# issaugom nuotraukas i kieta diska
i = 0
voxel_width = 128
for cand in cands[1:]:
j = 0
for path in all_mhd_file_paths:
if path.endswith(cand[0] + ".mhd"):
numpy_image, numpy_origin, numpy_spacing = load_itk_image(path)
world_coord = np.asarray([float(cand[3]), float(cand[2]), float(cand[1])])
voxel_coord = world_to_voxel_coord(world_coord, numpy_origin, numpy_spacing)
patch = numpy_image[int(voxel_coord[0]),
int(voxel_coord[1] - voxel_width / 2):int(voxel_coord[1] + voxel_width / 2),
int(voxel_coord[2] - voxel_width / 2):int(voxel_coord[2] + voxel_width / 2)]
patch = normalize_planes(patch)
# make sure that array does not go out of bounds
if (int(voxel_coord[1] - voxel_width / 2) + int(voxel_coord[1] + voxel_width / 2) > voxel_width) and \
(int(voxel_coord[2] - voxel_width / 2) + int(voxel_coord[2] + voxel_width / 2) > voxel_width):
Image.fromarray(patch * 255).convert('L').save(os.path.join(pics_path, 'candidate_' + str(i) +
'_subset_' + subsets_for_mhds[j]
+ '_class_' + cand[4] + '.tiff'))
break
j += 1
i += 1
|
import numpy as np
# import tensorflow as tf
# import keras
import random
import time
import pygame
import sys
LEFT, UP, RIGHT, DOWN = 0, 1, 2, 3
gridSize = (8, 8)
class Game():
def __init__(self):
self.direction = LEFT
self.length = 1
self.segments = [
(gridSize[0]//2, gridSize[1]//2)
]
self.apple = (gridSize[0]//2-1, gridSize[1]//2-1)
# self.apple = (0, 0)
self.gameOver = False
self.score = 0
def update(self, action):
#take an action (direction) and move the snake
#additionally, check if game is ended by collision
self.changeDirection(action)
oldHead = self.segments[0]
newHead0, newHead1 = oldHead[0], oldHead[1]
if self.direction == LEFT:
newHead1 = oldHead[1] - 1
elif self.direction == RIGHT:
newHead1 = oldHead[1] + 1
elif self.direction == UP:
newHead0 = oldHead[0] - 1
elif self.direction == DOWN:
newHead0 = oldHead[0] + 1
newHead = (newHead0, newHead1)
#check if self collision or head off screen
if newHead in self.segments[:-1]:
self.gameOver = True
if newHead0 == -1 or newHead0 == gridSize[0]:
self.gameOver = True
if newHead1 == -1 or newHead1 == gridSize[1]:
self.gameOver = True
#check if apple eat
#length extension on apple eat
if newHead == self.apple:
self.score += 1
self.resetApple() #have to insert new head before resetting apple otherwise it bugs out if apple's spawned on new head location
else:
self.segments.pop()
if not self.gameOver:
self.segments.insert(0, newHead)
def evaluate(self):
if len(self.segments) == 0:
return -100
return 100*len(self.segments)-self.dist(self.segments[0], self.apple)
def dist(self, a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def changeDirection(self, newDir):
# self.direction = (self.direction + newDir) % 4
self.direction = newDir
def resetApple(self):
while True:
self.apple = (random.randrange(0, gridSize[0]), random.randrange(0, gridSize[1]))
if self.apple not in self.segments:
break
def getScore(self):
return self.score
class SnakeEnv():
def __init__(self):
self.game = Game()
def reset(self):
self.game = Game()
return self.observe(self.game)
def step(self, action):
scoreBefore = self.game.evaluate()
self.game.update(action)
scoreAfter = self.game.evaluate()
reward = scoreAfter - scoreBefore
obs = self.observe(self.game)
done = self.game.gameOver
return obs, reward, done
def observe(self, game):
"""
return board state either as board or in form snake state, apple pos
"""
#board
# observation = np.zeros((gridSize[0], gridSize[1]))
# for segment in game.segments:
# observation[segment[0], segment[1]] = 1
# observation[game.apple[0], game.apple[1]] = -1
# return observation
#actor states
observation = [
game.segments,
game.direction,
game.apple,
]
return observation
def dumb_move_from_obs(obs):
"""
used to test SnakeEnv
simplistic move returned from relative positions of head, apple, direction
"""
def get_dist(A, B):
return abs(A[0]-B[0]) + abs(A[1]-B[1])
def evaluate(obs):
head = obs[0][0]
if (
head in obs[0][1:-1] or
head[0] < 0 or
head[0] >= gridSize[0] or
head[1] < 0 or
head[1] >= gridSize[1]
):
return -(gridSize[0]+gridSize[1])
else:
return -get_dist(head, obs[2])
def move_in_dir(head, dir):
if dir == LEFT:
step = (0, -1)
elif dir == RIGHT:
step = (0, +1)
elif dir == UP:
step = (-1, 0)
elif dir == DOWN:
step = (+1, 0)
return (head[0] + step[0], head[1] + step[1])
segments = obs[0].copy()
direction = obs[1]
apple = obs[2]
scores = np.array([0, 0, 0])
for i in range(3):
move = i-1
new_dir = (obs[1] + move) % 4
new_head_location = move_in_dir(obs[0][0], new_dir)
new_segments = segments.copy()
new_segments.insert(0, new_head_location)
new_obs = [new_segments, new_dir, apple]
scores[i] = evaluate(new_obs)
a = np.argmax(scores)
# a = np.random.choice(np.flatnonzero(scores == scores.max()))
return (obs[1] + a - 1) % 4
def frame_from_obs(obs):
segments = obs[0]
apple = obs[2]
observation = np.zeros((gridSize[0], gridSize[1]))
for segment in segments:
observation[segment[0], segment[1]] = 1
observation[apple[0], apple[1]] = -1
return observation
class gameDisplayer():
"""
takes frames and displays
"""
def __init__(self, frames):
self.blockSize = 30
self.fps = 15
self.clock = pygame.time.Clock()
pygame.init()
self.surface = pygame.display.set_mode([gridSize[0]*self.blockSize, gridSize[1]*self.blockSize])
pygame.display.set_caption("")
self.frames = frames
self.currentFrame = 0
def run(self):
while True:
self.renderFrame(self.currentFrame)
if self.currentFrame < len(self.frames)-1:
self.currentFrame += 1
self._checkEvents()
self.clock.tick(self.fps)
def renderFrame(self, i):
self.surface.fill((230, 230, 230))
frame = self.frames[i]
for x in range(gridSize[0]):
for y in range(gridSize[1]):
currentSquare = pygame.Rect(self.blockSize*x, self.blockSize*y, self.blockSize, self.blockSize)
if frame[x][y] == -1:
pygame.draw.rect(self.surface, "red", currentSquare)
elif frame[x][y] == 1:
pygame.draw.rect(self.surface, "green", currentSquare)
pygame.display.flip()
def displayFrames(frames):
pass
def _checkEvents(self):
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
sys.exit()
def main():
frames = []
env = SnakeEnv()
obs = env.reset()
frames = []
while True:
obs, reward, done = env.step(dumb_move_from_obs(obs))
frames.append(frame_from_obs(obs))
if done:
break
g = gameDisplayer(frames)
g.run()
# n_inputs = gridSize[0]*gridSize[1]
# model = keras.models.Sequential([
# keras.layers.Dense(5, activation="elu", input_shape=[n_inputs]),
# keras.layers.Dense(3, activation="softmax"),
# ])
# def play_one_step(env, obs, model, loss_fn):
# with tf.GradientTape() as tape:
# dir = model(obs.flatten()[np.newaxis])
# roll = tf.random.uniform([1,1])
# if roll < dir[0][0]:
# action = -1
# elif roll < dir[0][0] + dir[0][1]:
# action = 0
# else:
# action = 1
# y_target = np.array([0, 0, 0])
# y_target[np.argmax(dir)] = 1
# y_target = y_target[np.newaxis]
# loss = tf.reduce_mean(loss_fn(y_target, dir))
# grads = tape.gradient(loss, model.trainable_variables)
# obs, reward, done = env.step(action)
# return obs, reward, done, grads
# def play_multiple_episodes(env, n_episodes, n_max_steps, model, loss_fn):
# all_rewards = []
# all_grads = []
# for episode in range(n_episodes):
# current_rewards = []
# current_grads = []
# obs = env.reset()
# for step in range(n_max_steps):
# obs, reward, done, grads = play_one_step(env, obs, model, loss_fn)
# current_rewards.append(reward)
# current_grads.append(grads)
# if done:
# break
# all_rewards.append(current_rewards)
# all_grads.append(current_grads)
# return all_rewards, all_grads
# def discount_rewards(rewards, discount_factor):
# discounted = np.array(rewards)
# for step in range(len(rewards) - 2, -1, -1):
# discounted[step] += discounted[step + 1] * discount_factor
# return discounted
# def discount_and_normalize_rewards(all_rewards, discount_factor):
# all_discounted_rewards = [discount_rewards(rewards, discount_factor) for rewards in all_rewards]
# flat_rewards = np.concatenate(all_discounted_rewards)
# reward_mean = flat_rewards.mean()
# reward_std = flat_rewards.std()
# if reward_std == 0:
# print(flat_rewards)
# print("div by 0")
# return all_discounted_rewards
# # return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
# n_iterations = 50
# n_episodes_per_update = 50
# n_max_steps = 100
# discount_factor = 0.95
# optimizer = keras.optimizers.Adam(lr=0.01)
# # loss_fn = keras.losses.binary_crossentropy
# loss_fn = keras.losses.categorical_crossentropy
# for iteration in range(n_iterations):
# print(f"working on iteration {iteration}...")
# all_rewards, all_grads = play_multiple_episodes(env, n_episodes_per_update, n_max_steps, model, loss_fn)
# all_final_rewards = discount_and_normalize_rewards(all_rewards, discount_factor)
# all_mean_grads = []
# for var_index in range(len(model.trainable_variables)):
# mean_grads = tf.reduce_mean([
# final_reward*all_grads[episode_index][step][var_index]
# for episode_index, final_rewards in enumerate(all_final_rewards)
# for step, final_reward in enumerate(final_rewards)
# ], axis=0)
# all_mean_grads.append(mean_grads)
# optimizer.apply_gradients(zip(all_mean_grads, model.trainable_variables))
# def render_policy_net(model, n_max_steps=200, seed=42):
# frames = []
# env = SnakeEnv()
# obs = env.reset()
# for step in range(n_max_steps):
# frames.append(obs)
# dir = model.predict(obs.flatten()[np.newaxis])
# roll = tf.random.uniform([1,1])
# if roll < dir[0][0]:
# action = -1
# elif roll < dir[0][0] + dir[0][1]:
# action = 0
# else:
# action = 1
# obs, reward, done = env.step(action)
# if done:
# break
# return frames
# frames = render_policy_net(model)
# g = gameDisplayer(frames)
# g.run()
# # while True:
# # obs, reward, done = env.step(random.randrange(-1,2))
# # frames.append(obs)
# # if done:
# # break
# # g = gameDisplayer(frames)
# # g.run()
# # while True:
# # head = env.game.segments[0]
# # apple = env.game.apple
# # if head[0] > apple[0]:
# # dir = LEFT
# # else:
# # dir = RIGHT
# # if head[1] > apple[1]:
# # dir = UP
# # elif head[1] < apple[1]:
# # dir = DOWN
# # obs, reward = env.step(dir)
# # print(obs, reward)
# # time.sleep(.1)
if __name__ == "__main__":
main() |
import os
import json
import jieba
import scipy
import random
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool
from sklearn.metrics.pairwise import cosine_similarity
import torch
import torch.nn as nn
TEST_FLAG = 'test_b'
class LoadDataset:
def __init__(self, data_path, debug=False):
self.data_path = data_path
if debug:
self.df = pd.read_csv(f'{data_path}/whole_df_new_debug.csv', sep='\t', encoding='gb18030')
else:
self.df = pd.read_csv(f'{data_path}/whole_df_new.csv', sep='\t', encoding='gb18030')
print('*' * 20)
print(self.df.flag.unique())
print('*' * 20)
# if debug:
# self.df = pd.concat([
# self.df.loc[self.df.flag == 'pointwise'].sample(n=10000),
# self.df.loc[self.df.flag == 'test_b'],
# self.df.loc[self.df.flag == 'pairwise'],
# ]).reset_index(drop=True)
self.df['id'] = self.df['id'].astype('str')
print(f'whole_df: {len(self.df)}')
if debug:
self.label_df = pd.read_csv(f'{data_path}/label_df_debug.csv', sep='\t')
else:
self.label_df = pd.read_csv(f'{data_path}/label.tsv', sep='\t', header=None)
self.label_df.columns = ['id1', 'id2', 'label']
self.label_df['id1'] = self.label_df['id1'].astype(str)
self.label_df['id2'] = self.label_df['id2'].astype(str)
print(f'label_df: {len(self.label_df)}')
self.tag2id = self.get_tag2id()
self.id2title, self.id2asr, self.id2frame_num, self.id2tag, self.id2cate = self.get_dict_info()
print(f'id2title:{len(self.id2title)}')
print(f'id2asr:{len(self.id2asr)}')
print(f'id2frame_num:{len(self.id2frame_num)}')
print(f'id2tag:{len(self.id2tag)}')
print(f'id2cate:{len(self.id2cate)}')
self.pointwise_ids = self.df.loc[self.df.flag == 'pointwise'].id.values.tolist()
self.pairwise_ids = self.df.loc[self.df.flag == 'pairwise'].id.values.tolist()
self.test_ids = self.df.loc[self.df.flag == TEST_FLAG].id.values.tolist()
print(f'Pointwise|Pairwise|Test id nums={len(self.pointwise_ids)}|{len(self.pairwise_ids)}|{len(self.test_ids)}')
self.video_shape = [32, 1536]
def get_tag2id(self):
with open(f'{self.data_path}/tag_list.txt', 'r') as f:
tag_list = f.read().strip().split('\n')
print('tag_num: ', len(tag_list))
tag2id = {}
for idx, tag in enumerate(tag_list):
tag2id[tag] = idx
return tag2id
def get_dict_info(self):
id2title = {}
id2asr = {}
id2frame_num = {}
for _id, _title, _asr_text, _num_frames in tqdm(self.df[['id', 'title', 'asr_text', 'num_frames']].values, desc='Making Whole Id Dicts'):
id2title[_id] = _title
id2asr[_id] = _asr_text
id2frame_num[_id] = _num_frames
id2tag = {}
id2cate = {}
for _id, _tag, _category in tqdm(self.df.loc[~self.df.flag.isin(['test_a', 'test_b'])][['id', 'tag_id', 'category_id']].values, desc='Making Train Id Dicts'):
id2tag[_id] = _tag
id2cate[_id] = _category
return id2title, id2asr, id2frame_num, id2tag, id2cate
def jieba_seg(df):
return df.apply(lambda x: jieba.lcut(x))
def parallelize_df_func(df, func, num_partitions=16, n_jobs=8):
df_split = np.array_split(df, num_partitions)
pool = Pool(n_jobs)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def save_pickle(dic, save_path):
with open(save_path, 'wb') as f:
pickle.dump(dic, f)
def load_pickle(load_path):
with open(load_path, 'rb') as f:
message_dict = pickle.load(f)
return message_dict
def save_json(save_path, dic):
with open(save_path, 'w') as f:
json.dump(dic, f)
def load_json(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return data
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class EarlyStopping:
def __init__(self, patience=7, mode="max", delta=0.):
self.patience = patience
self.counter = 0
self.mode = mode
self.best_score = None
self.early_stop = False
self.delta = delta
if self.mode == "min":
self.val_score = np.Inf
else:
self.val_score = -np.Inf
def __call__(self, epoch_score, model, model_path):
if self.mode == "min":
score = -1.0 * epoch_score
else:
score = np.copy(epoch_score)
if self.best_score is None:
self.best_score = score
self.save_checkpoint(epoch_score, model, model_path)
elif score < self.best_score: # + self.delta
self.counter += 1
print('EarlyStopping counter: {} out of {}'.format(self.counter, self.patience))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
# ema.apply_shadow()
self.save_checkpoint(epoch_score, model, model_path)
# ema.restore()
self.counter = 0
def save_checkpoint(self, epoch_score, model, model_path):
if epoch_score not in [-np.inf, np.inf, -np.nan, np.nan]:
print('Validation score improved ({} --> {}). Saving model!'.format(self.val_score, epoch_score))
torch.save(model.state_dict(), model_path)
self.val_score = epoch_score
def caculate_spearmanr_score(pair_df, emb_dict):
y_true = pair_df.label.values
y_pred = []
for _id1, _id2 in pair_df[['id1', 'id2']].values:
_pred = cosine_similarity([emb_dict[str(_id1)]], [emb_dict[str(_id2)]])[0][0]
y_pred.append(_pred)
return scipy.stats.spearmanr(y_pred, y_true).correlation
class FocalBCELoss(nn.Module):
def __init__(self, gamma=2, alpha=0.65, size_average=True):
super(FocalBCELoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
def forward(self, logits, targets):
l = logits.reshape(-1)
t = targets.reshape(-1)
p = torch.where(t >= 0.5, l, 1 - l)
a = torch.where(t >= 0.5, self.alpha, 1 - self.alpha)
logp = torch.log(torch.clamp(p, 1e-4, 1-1e-4))
loss = - a * logp * ((1 - p)**self.gamma)
if self.size_average: return loss.mean()
else: return loss.sum()
class SpearmanLoss(nn.Module):
def __init__(self, size_average=True):
super(SpearmanLoss, self).__init__()
self.size_average = size_average
def forward(self, logits, targets):
loss = torch.var(logits - targets, dim=-1) / torch.std(logits, dim=-1) * torch.std(targets, dim=-1)
if self.size_average: return loss.mean()
else: return loss.sum() |
# Generated by Django 2.1 on 2018-08-20 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Proposed_law',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250, verbose_name='titulo')),
('description', models.TextField(verbose_name='descrição')),
],
options={
'verbose_name': 'proposta de lei',
'verbose_name_plural': 'propostas de leis',
},
),
]
|
import pandas as pd
import numpy as np
import itertools
import os
import logging
import datetime
base_path = os.path.dirname(os.path.abspath(__file__))
today_file = str(datetime.date.today())
work_file = '..\\logs\\' + today_file + '.log'
log_path = os.path.join(base_path, work_file)
logging.basicConfig(level=logging.DEBUG,#控制台打印的日志级别
format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
filename = log_path,
filemode = 'a', ##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
# a是追加模式,默认如果不写的话,就是追加模式
)
class Cal_Words(object):
def word_count(self, Split_Keywords):
'''
本函数用于统计词频
参数:
Split_Keywords: 需要统计词频的Series
'''
Word_Count = Split_Keywords.value_counts()#统计单词个数
Word_Count = Word_Count.reset_index()#将Series化为DataFrame
Word_Count.columns = ['keywords', 'times']
return Word_Count
def split_words_one(self, keywords):
'''
本函数用于将keywords序列按照一个单词进行分词
:param keywords: Series类型
:return:
'''
Split_Keywords = keywords.apply(
lambda x: x.replace(',', ' ').replace(':', ' ').replace(')', ' ').replace('(', ' ').replace(',',
' ').replace(
'/', ' ').replace('\'', '').replace('\"', '').replace('-', ' ').replace('+', ' ').replace('#',
'').replace(
' ', ' ').replace(' ', ' ').strip()).apply(lambda x: x.split(' '))
return Split_Keywords
def words_number_count(self, keywords):
'''
本函数用于统计每个搜索词的单词个数
参数:
keywords: 需要进行统计词个数的关键词,Series格式
'''
Split_Keywords = self.split_words_one(keywords)
words_number = Split_Keywords.str.len()
return words_number
def get_iter(self, keywords, k):
'''
本函数用于按照k个单词进行分词
keywords::List对象,元素为按单个词分割后的列表
k::按k个单词进行分词,需要被分割为包含k个关键字词组
return:: List对象,包含被分割好的词组
'''
x_len = len(keywords)
Lst = []
for i in range(x_len - k + 1):
Lst.append(' '.join(keywords[i:i + k]))
return Lst
def split_keywords_k(self, keywords, k):
'''
本函数用于按k个单词进行分词
参数:
keywords: get_keywords函数的结果,Series
k: 按k个单词进行分词
'''
words_num = self.words_number_count(keywords)#keywords包含多少个单词,series格式
Index = words_num >= k #查看单词总数是否大于要被分的个数
# 若k大于全部的词,则返回-1
if sum(Index) == 0:
return -1
# 筛选出需要被分割处理的单词的index,得到需要处理的词
Index = np.array(keywords.index[Index])
# 先进行一个词的分词
Split_Keywords = self.split_words_one(keywords)
# 得到需要处理的词
Split_Keywords = Split_Keywords[Index]
# 进行k分词
Split_Keywords_k = Split_Keywords.apply(lambda x: self.get_iter(x, k))
list_1 = list(Split_Keywords_k)
list_2 = list(itertools.chain(*list_1))
Split_Keywords_k = pd.Series(list_2)
return Split_Keywords_k
def split_keywords_k2(self, keywords_reviews, k):
'''
本函数用于按k个单词进行分词
参数:
keywords: get_keywords函数的结果,DataFrame
k: 按k个单词进行分词
'''
keywords_copy = keywords_reviews.add_suffix('_copy')
keywords = keywords_reviews['title']
words_num = self.words_number_count(keywords)#keywords包含多少个单词,series格式
Index = words_num >= k #查看单词总数是否大于要被分的个数
# 若k大于全部的词,则返回-1
if sum(Index) == 0:
return -1
# 筛选出需要被分割处理的单词的index,得到需要处理的词
Index = np.array(keywords.index[Index])
# 先进行一个词的分词
Split_Keywords = self.split_words_one(keywords)
# 得到需要处理的词
Split_Keywords = Split_Keywords[Index]
# 进行k分词
Split_Keywords_k = Split_Keywords.apply(lambda x: self.get_iter(x, k))
# 将分好词的Series 与 title 和 reviews进行拼接, 并获取title和reviws列
Split_Keywords_k = keywords_copy.join(Split_Keywords_k, how='inner')[['title', 'reviews_copy']]
list1 = Split_Keywords_k.values.tolist()
title_review_lst = [[title, list_value[1]] for list_value in list1
for title in list_value[0]]
title_review_frame = pd.DataFrame(title_review_lst)
title_review_frame.columns = ['title', 'reviews']
return title_review_frame
def cal_save_words(self, df, writer):
'''
统计reviews和count并保存到excel
:param df: 需要处理的dataframe
:param writer: 写入本地excel位置
:return:
'''
for i in range(1, 11):
Split_Keywords = self.split_keywords_k2(df, i) # DataFrame格式,带keyword和reviews
if type(Split_Keywords) != int: # title的单词个数大于需要被划分的词组个数
Word_reviews = Split_Keywords.groupby(['title']).sum() # 统计reviews
Word_count = Split_Keywords['title'].value_counts()
Word_count.name = 'count'
word_statis = Word_reviews.join(Word_count, how='outer')
word_statis = word_statis.sort_values(by=['count', 'reviews'], ascending=False)
word_statis.to_excel(writer, sheet_name=str(i) + '个词')
writer.save()
writer.close()
def statis_word_reviews(self, file_path):
'''
用于保存词频统计的结果
:param file_path: 文件夹路径
:return:
'''
os.chdir(file_path)
path_all = os.listdir()
path_txt = []
logging.info('开始读取{path}中的文件'.format(path=file_path))
for i in path_all:
if i[-3:] == 'txt':
path_txt.append(i)
keyword_all = pd.DataFrame()
for path in path_txt:
keywords = pd.read_csv(path, sep='\t')
if len(keywords)>0:
keywords['reviews'] = keywords['reviews']. \
replace(',', '').replace('None', '0').astype(np.int64)
keyword_all = pd.concat([keywords, keyword_all], axis=0)
writer = pd.ExcelWriter(path[:path.find('.')] + '词频统计报告.xls')
logging.info('开始对{file}进行词频统计'.format(file=path))
self.cal_save_words(keywords, writer)
else:
logging.debug('此文件{file}未成功抓取到title'.format(file=path))
logging.info('文件夹下文件数量为:{length}'.format(length=len(path_txt)))
if len(path_txt)>1:
keyword_all = keyword_all.reset_index()
logging.info('开始进行全部词频统计:{path}'.format(path=file_path))
writer = pd.ExcelWriter('全部词频统计报告.xls')
self.cal_save_words(keyword_all, writer)
def statis_word_numbers(self, file_path):
'''
用于保存词频统计的结果
:param file_path: 文件夹路径
:return:
'''
read_path = file_path + '/all_titles.txt'
path = file_path + '/词频统计.xls'
keywords = pd.read_csv(read_path, sep='\t')['title']
if len(keywords) == 0:
logging.debug('未成功抓取到title')
return
writer = pd.ExcelWriter(path)
for i in range(1, 11):
Split_Keywords = self.split_keywords_k(keywords, i)
if type(Split_Keywords) != int:#title的单词个数大于需要被划分的词组个数
Word_Count = self.word_count(Split_Keywords)
Word_Count.to_excel(writer, sheet_name=str(i) + '个词')
writer.save()
writer.close()
if __name__ == '__main__':
cal_words = Cal_Words()
# keywords = pd.read_table('D:/cat food_page_title.txt', header=None).iloc[:, 1]
# word_number = cal_words.words_number_count(keywords)
# words_k = cal_words.split_keywords_k(keywords, 3)
# print(words_k)
# word_count = cal_words.word_count(words_k)
# print(word_count)
cal_words.statis_word_reviews('C:/Users/86178/Documents/Tencent Files/2880332577/FileRecv/manyinputs_') |
import numpy as np
import imutils
import cv2
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
######################################################
# blob detection parameters
######################################################
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 10 #10
params.maxThreshold = 200 #200
params.filterByArea = True #True
params.minArea = 400
params.maxArea = 1000
params.filterByCircularity = False
params.minCircularity = 0.0
params.filterByConvexity = False #True
params.minConvexity = 0.87
params.filterByInertia = False
params.minInertiaRatio = 0.01
############################################################################
# load the image, convert it to grayscale, blur it slightly, then find edges
############################################################################
image1 = cv2.imread('answerkeyfilled.jpg')
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
blurred1 = cv2.GaussianBlur(gray1, (5, 5), 0)
black1 = cv2.adaptiveThreshold(blurred1, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 10)
edged1 = cv2.Canny(black1,75, 200)
cropped11 = cv2.resize(edged1, (1600, 900))
############################################################################
# find contours#############################################################
############################################################################
cnts1 = cv2.findContours(edged1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]
docCnt1 = None
# ensure that at least one contour was found
if len(cnts1) > 0:
# sort the contours according to their size in
# descending order
cnts1 = sorted(cnts1, key=cv2.contourArea, reverse=True)
# loop over the sorted contours
for c in cnts1:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points,
# then we can assume we have found the paper
if len(approx) == 4:
docCnt1 = approx
break
############################################################################
# load the image, convert it to grayscale, blur it slightly, then find edges
############################################################################
image2 = cv2.imread('answerkey.jpg')
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
blurred2 = cv2.GaussianBlur(gray2, (5, 5), 0)
black2 = cv2.adaptiveThreshold(blurred2, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 10)
edged2 = cv2.Canny(black2,75, 200)
cropped22 = cv2.resize(edged2, (1600, 900))
############################################################################
# find contours#############################################################
############################################################################
cnts2 = cv2.findContours(edged2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts2 = cnts2[0] if imutils.is_cv2() else cnts2[1]
docCnt2 = None
# ensure that at least one contour was found
if len(cnts2) > 0:
# sort the contours according to their size in
# descending order
cnts2 = sorted(cnts2, key=cv2.contourArea, reverse=True)
# loop over the sorted contours
for c in cnts2:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points,
# then we can assume we have found the paper
if len(approx) == 4:
docCnt2 = approx
break
warped1 = four_point_transform(gray1, docCnt1.reshape(4, 2))
warped2 = four_point_transform(gray2, docCnt2.reshape(4, 2))
######################################################
# prepare both images
######################################################
RGB1 = cv2.GaussianBlur(warped1, (5,5), 0)
RGB1 = cv2.adaptiveThreshold(RGB1, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 10)
cropped1 = cv2.resize(RGB1, (500, 1000))
RGB2 = cv2.GaussianBlur(warped2, (5,5), 0)
RGB2 = cv2.adaptiveThreshold(RGB2, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 10)
cropped2 = cv2.resize(RGB2, (500, 1000))
######################################################
# xor images and prepare for blob detection
######################################################
s = ~(RGB1 ^ RGB2)
s = cv2.adaptiveThreshold(s, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 10)
s = cv2.GaussianBlur(s, (75,75), 10)
######################################################
# run blob detection
######################################################
detector = cv2.SimpleBlobDetector_create(params)
akeypoints = detector.detect(s)
answerkeypoints = cv2.drawKeypoints(s, akeypoints, np.array([]), (0, 0, 255),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('result', answerkeypoints)
numberofincorrect = int(len(akeypoints)/2)
print(numberofincorrect)
#gui menu
back = cv2.imread('image.jpg',1)
back = cv2.resize(back, (450, 50))
cv2.putText(back, "This Student has " + str(numberofincorrect) + " Errors",(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (50,50,50), 2)
cv2.imshow('Menu', back)
cv2.waitKey(0)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_vif import plugin
from os_vif import objects
from vif_plug_ovs import processutils
from vif_plug_ovs import linux_net
PLUGIN_NAME = 'hw_veb'
class HardwareVebPlugin(base.PluginBase):
"""
A VIF type that plugs a virtual machine nework interface into a SR-IOV
virtual function (VF) using a macvtap VNIC type.
"""
def __init__(self, **config):
processutils.configure(**config)
def get_supported_vifs(self):
return set([objects.PluginVIFSupport(PLUGIN_NAME, '1.0', '1.0')])
def plug(self, instance, vif):
if vif.vnic_type == vnic_types.MACVTAP:
linux_net.set_vf_interface_vlan(vif.profile['pci_slot'],
mac_addr=vif.address,
vlan=vif.vlan)
def unplug(self, vif):
if vif.vnic_type == vnic_types.MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif.profile['pci_slot'],
mac_addr=vif.address)
|
# coding: utf-8
# In[13]:
import pywrapfst as ofst
import easygui
FST_PATH='Lexicon/FSTs/'
categories =['Verbs','Nouns','Pronouns','Adjectives','Adverbs','Propositions','Auxillary Verbs']
# In[40]:
def set_input_fsm(input_string,ifst,isys):
i=0
while input_string[i]!='+':
#print(str(i)+ " " + str(i+1)+ " " + input_string[i])
print >> ifst, str(i)+ " " + str(i+1)+ " " + input_string[i]
i+=1
props=input_string[i+1:].split('+')
for p in props:
#print(str(i) + " " + str(i+1)+" +" +p)
print >> ifst, str(i) + " " + str(i+1)+" +" +p
i+=1
print >> ifst, str(i)
#print(i)
try:
return ifst.compile()
except:
return None
def transduce((fst,ifst,isys),input_string):
a=set_input_fsm(input_string,ifst,isys)
if a==None:
return "";
else:
b=ofst.compose(a,fst)
b.set_input_symbols(isys)
b.set_output_symbols(isys)
b.project(project_output=True)
lines=b.text(acceptor=True).split("\n")
output=""
for l in lines:
sp=l.split("\t")
if len(sp)==3 and sp[2]!='<eps>':
output+=sp[2]
return output
def get_fst(name):
syms=ofst.SymbolTable.read_text(FST_PATH+name+'/symbols.txt')
fst=ofst.Fst.read(FST_PATH+name+'/'+name.lower()+'.fst')
return fst,ofst.Compiler(isymbols=syms,osymbols=syms,acceptor=True),syms
# In[46]:
def run(input_string):
matches=""
found=False
for i in categories:
match=transduce(get_fst(i),input_string)
if match!="":
matches+=match+"\n"
found=True
if found==False:
print("No match!")
return matches
# In[56]:
a=easygui.enterbox(title="Enter lexical form")
if '+' not in a:
easygui.textbox(text="No match, because you did not enter any POS tag")
elif a!=None:
results=run(a)
if len(results)>0:
easygui.textbox(text=results)
else:
easygui.textbox(text="No match!")
# In[ ]:
|
from kafka import KafkaConsumer
import yolo
import mongoDB
import storage_helper
import json
import os
consumer = KafkaConsumer('demo', bootstrap_servers="13.233.230.133:9092")
for msg in consumer:
print(msg)
data = json.loads(msg.value)
key = data["blob_id"]
storage_helper.download_file(key)
analysis_report = yolo.analyze_image(key)
mongoDB.save_objects_data(key,analysis_report)
os.remove(key)
|
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index),
path('slider/', views.slider)
]
|
import sys
import arguments
import numpy as np
import time
from Instance import Instance
def main(args):
# Reading file and creating instance problem
kwargs = {"maxiters": args.maxiters, "pop_size": args.size}
file = "./data/" + args.file
instance = Instance.from_file(file, **kwargs)
t1 = time.time()
# Initial population
instance.init()
# Repeat
for _ in range(args.maxiters):
# Calculate frequencies
instance.calc_frequencies()
# Calculate joint frecuencies
instance.calc_joint_frequencies()
# Calculate mutual information
instance.mutual_info()
# Create chain model
instance.calc_chain_model()
# Sampling
instance.sampling()
t2 = time.time()
ex_time = (t2 - t1) * 1000.0
print(instance.best, instance.fitness(instance.best), f'{ex_time}s')
main(arguments.defineArgs())
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 20:03:57 2020
@author: WELCOME
"""
"""
Time Complexity - O(N)
Space - O(N)
"""
class Solution:
def maxSumAfterPartitioning(self, A: List[int], K: int) -> int:
totalMax=0
h={}
def helper(index,total):
nonlocal h
if index in h:
return h[index]+total
if index==len(A):
return total
if index>=len(A):
return 0
m=A[index]
newMax=0
for i in range(index,index+K):
if i>=0 and i<len(A):
m=max(m,A[i])
newMax=max(newMax,helper(i+1,total+m*(i+1-index)))
h[index]=newMax-total
return newMax
return helper(0,0) |
from kafka import KafkaProducer
import json
class MyKafka(object):
def __init__(self, kafka_brokers):
self.producer = KafkaProducer(
value_serializer=lambda v: json.dumps(v).encode('utf-8'),
bootstrap_servers=kafka_brokers
)
def send_page_data(self, json_data):
self.producer.send('admintome-pages', json_data) |
"""Tests for the plot module."""
import numpy as np
from app.demo.plot import (
model,
get_perp,
prepare_data,
generate_plot_image_string
)
from app.demo.plot import offset as plot_offset
def test_model():
"""Test the class prediction based on the highest score."""
params = np.array([[1, 1, 1], [2, 2, 2]])
data = np.array([[3, 3], [4, 4]])
preds = model(data, params)
assert preds.shape == (2,)
assert preds[0] == 1
assert preds[1] == 1
def test_get_perp():
"""Test the generation of parameters of a perpendicular to a line defined
by params."""
params = np.array([-4, -1, 10])
x_min = 0
x_max = 14
x_mid, y_mid, dx, dy = get_perp(x_min, x_max, params)
assert x_mid == 7
assert y_mid == -18
assert dx == plot_offset
assert dy == 0.0625
params = np.array([0, -1, 10])
x_mid, y_mid, dx, dy = get_perp(x_min, x_max, params)
assert x_mid == 7
assert y_mid == 0
assert dx == plot_offset
assert dy < -100
params = np.array([-4, 0, 10])
x_mid, y_mid, dx, dy = get_perp(x_min, x_max, params)
assert x_mid == 7
assert y_mid > 100
assert dx == plot_offset
assert dy == 0
assert True
def test_prepare_data():
"""Test the generation of meshgrid coordinates."""
data = np.array([[3, 3, 1], [4, 4, 2]])
xx, yy, x_min, x_max, y_min, y_max, x_, y_ = prepare_data(data, 0.005)
assert xx.shape == (600, 600)
assert yy.shape == (600, 600)
assert x_max == 5
assert x_min == 2
assert y_min == 2
assert y_max == 5
assert x_.shape == (2, 2)
assert y_.shape == (2,)
def test_generate_plot_image_string():
"""Generate the ability to produce a string representing plotted data."""
data = np.array([[3, 3, 1], [4, 4, 2]])
params = np.array([[1, 1, 1], [2, 2, 2]])
str_ = generate_plot_image_string(data, params)
assert len(str_) > 0
|
import asyncio
import pytest
import ucp
@pytest.mark.parametrize("server_guarantee_msg_order", [True, False])
def test_mismatch(server_guarantee_msg_order):
# We use an exception handle to catch errors raised by the server
def handle_exception(loop, context):
msg = str(context.get("exception", context["message"]))
loop.test_failed = msg.find(loop.error_msg_expected) == -1
loop = asyncio.get_event_loop()
loop.set_exception_handler(handle_exception)
loop.test_failed = False
loop.error_msg_expected = "Both peers must set guarantee_msg_order identically"
with pytest.raises(ValueError, match=loop.error_msg_expected):
lt = ucp.create_listener(
lambda x: x, guarantee_msg_order=server_guarantee_msg_order
)
loop.run_until_complete(
ucp.create_endpoint(
ucp.get_address(),
lt.port,
guarantee_msg_order=(not server_guarantee_msg_order),
)
)
loop.run_until_complete(asyncio.sleep(0.1)) # Give the server time to finish
assert not loop.test_failed, "expected error message not raised by the server"
|
# coding: utf8
import wx_spider
if __name__ == '__main__':
gongzhonghao = input(u'input weixin gongzhonghao:')
if not gongzhonghao:
gongzhonghao = 'spider'
text = " ".join(wx_spider.run(gongzhonghao))
print(text)
|
from downloader import download
from collections import defaultdict
import operator
download(2017, 8)
with open('aoc2017_8input.txt') as inputfile:
data = inputfile.read()
print(data)
operations = {'inc': operator.add, 'dec': operator.sub}
registers = defaultdict(int)
highest = 0
for line in data.splitlines():
parts = line.split()
register = parts[0]
op = operations[parts[1]]
if eval(f'{registers[parts[4]]} {parts[5]} {parts[6]}'):
registers[register] = op(registers[register], int(parts[2]))
highest = max(highest, registers[register])
print(max(registers.values()))
print(highest)
|
"""Handles how much health bullets and entities have"""
import pygame
from sprite.sprite_library import RectangleSprite
class Health:
"""A normal health object that keeps track of maximum and remaining HP"""
def __init__(self, hp=1, regen=0):
"""Creates the Health object"""
self.max_hp = hp
self.hp = hp
self.regen = regen
def take_damage(self, dmg):
"""Makes the object take dmg damage"""
self.hp -= dmg
if self.hp <= 0:
self.die()
def dead(self):
"""Checks whether or not the object has died"""
return self.hp == 0
def die(self):
"""Kills the object"""
self.hp = 0
def update(self):
"""Regenerates health if not dead"""
if not self.dead():
self.hp = min(self.hp+self.regen, self.max_hp)
class InfiniteHealth(Health):
"""A version of Health that cannot die unless die() is invoked directly"""
def take_damage(self, dmg):
"""Doesn't take damage"""
class OneHealth(Health):
"""A version of Health that dies instantly upon taking damage"""
def take_damage(self, dmg):
"""Dies if damage is positive"""
if dmg > 0:
self.die()
class HealthBar(RectangleSprite):
"""A health bar sprite for a given Health object"""
def __init__(self, containers, health, size, start, border_size=1, color=(1, 1, 1)):
"""Creates the health bar"""
super().__init__(containers, (255, 255, 255), size, border_size, start)
self.health = health
self.inner_color = color
def update(self):
"""Draws the health bar"""
# drawing the image is done by 3 steps:
# draw white (for border), draw black (for inside), draw self.color (for health)
super().update()
self.image.fill(self.inner_color, pygame.Rect(
self.border_size,
self.border_size,
int(self.inner_size[0] * self.health.hp / self.health.max_hp),
self.inner_size[1]
))
|
#-*- coding:utf-8 -*-
import urllib
from bs4 import BeautifulSoup
import os
import csv
from collections import deque
import time
#//////////////////////////////get data per hour//////////////////////////////#
def getRealTimeData(url):
# ------------------------------beautifulsoup------------------------------#
opener = urllib.urlopen(url)
html = opener.read()
opener.close()
soup = BeautifulSoup(html,'lxml')
#get monitoring station
stationHTML = repr(soup.findAll('div',attrs="tilNormal"))
station = BeautifulSoup(stationHTML,'lxml').div.string
station = station.replace('/',' ')
#get latest time
timeHTML = repr(soup.findAll('td',attrs="H24C_ColDateTime"))
latestTime = BeautifulSoup(timeHTML,'lxml').td.string
latestTime = latestTime.replace('\\xa0',' ')
#get lastest data
data = soup.findAll('td',attrs="H24C_ColItem")
dataList = [BeautifulSoup(repr(i),'lxml').td.string for i in data[:6]]
#------------------------------write to csv------------------------------#
current_path = os.getcwd()
save_path = os.path.join(current_path, 'AtmosphereData',station)
if not os.path.exists(save_path):
os.makedirs(save_path)
file= latestTime[0:10] + '_' + station + '.csv'
filename = os.path.join(save_path,file)
# write header
if not os.path.exists(filename):
with open(filename,'w') as csvfile:
fieldnames = ['Datetime', 'NO2(ug/m3)', 'O3(ug/m3)', 'SO2(ug/m3)', 'CO(ug/m3)', 'PM10(ug/m3)', 'PM2.5(ug/m3)']
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n',fieldnames=fieldnames)
writer.writeheader()
#check datetime, if the data exist, return, no writing to csv
with open(filename, 'r') as f:
lastTime = deque(csv.reader(f), 1)[0][0]
if lastTime == latestTime:
return 0
#write data
with open(filename, 'a') as csvfile:
fieldnames = ['Datetime', 'NO2', 'O3', 'SO2', 'CO', 'PM10', 'PM2.5']
writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\n', fieldnames=fieldnames)
writer.writerow({'Datetime': latestTime, 'NO2': str(dataList[0]), 'O3':str(dataList[1]),\
'SO2': str(dataList[2]), 'CO': str(dataList[3]), 'PM10': str(dataList[4]),\
'PM2.5': str(dataList[5])})
returnData = str(latestTime) + ' '+ 'NO2: '+str(dataList[0]) +' '+ 'O3: ' + str(dataList[1])+ ' '+\
'SO2: ' + str(dataList[2]) + ' '+ 'CO: ' + str(dataList[3]) + \
' ' + 'PM10: ' + str(dataList[4]) + ' ' +'PM2.5: ' + str(dataList[5]) + ' '+ str(station)
print returnData
#//////////////////////////////main function//////////////////////////////#
EPDstaionURL = ['http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration1f2c.html?stationid=70', #Yuen Long 元朗
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration45fd.html?stationid=80', #Central/Western 中心区
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentratione1a6.html?stationid=73', #Eastern 东区
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationfb71.html?stationid=74', # Kwun Tong 观塘
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationdb46.html?stationid=66', # Sham Shui Po 深水埗
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration30e8.html?stationid=72', # Kwai Chung 葵涌
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration228e.html?stationid=77', # Tsuen Wan 荃湾
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration0b35.html?stationid=83', # Tseung Kwan O 将军澳
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration537c.html?stationid=82', # Tuenn Mun 屯门
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationf322.html?stationid=78', # Tung Chung 东涌
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration6e9c.html?stationid=69', # Tai Po 大埔
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration2c5f.html?stationid=75', # Sha Tin 沙田
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration233a.html?stationid=76', # Tap Mun 塔门
#road side station 路边监测站
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration5ca5.html?stationid=71', #Causeway Bay 铜锣湾
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentrationf9dd.html?stationid=79', #Central 中环
'http://www.aqhi.gov.hk/en/aqhi/past-24-hours-pollutant-concentration9c57.html?stationid=81' #Mong Kok 旺角
]
while True:
for url in EPDstaionURL:
try:
getRealTimeData(url)
except:
continue
time.sleep(600)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-10-11 08:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('incubator', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='incubator',
options={'verbose_name': '孵化器', 'verbose_name_plural': '孵化器'},
),
]
|
# svg_parser.py
# Copyright Max Kolosov 2009 maxkolosov@inbox.ru
# http://saxi.nm.ru/
# BSD license
import sys
from StringIO import StringIO
from xml.etree import cElementTree
from svg_path_regex import svg_path_parser
def print_error():
exc, err, traceback = sys.exc_info()
print exc, traceback.tb_frame.f_code.co_filename, 'ERROR ON LINE', traceback.tb_lineno, '\n', err
del exc, err, traceback
px_x, px_y = 100, 100
relative_length_units = ('em', 'ex', 'px')
absolute_length_units = ('in', 'cm', 'mm', 'pt', 'pc')
def convert_length_units(value = None, func = float):
if value.strip()[-2:] == 'in':
value = func(value.strip()[:-2]) * px_x
elif value.strip()[-2:] == 'mm':
value = func(value.strip()[:-2]) * px_x / 25.2
elif value.strip()[-2:] == 'cm':
value = func(value.strip()[:-2]) * px_x / 2.52
elif value.strip()[-2:] == 'pt':
value = func(value.strip()[:-2]) * px_x / 72
elif value.strip()[-2:] == 'pc':
value = func(value.strip()[:-2]) * px_x / 72 * 12
return value
def to_int(value = None):
result = 1
try:
result = int(value)
except:
if value.strip()[-2:] in absolute_length_units:
value = convert_length_units(value, int)
elif value.strip()[-2:] in relative_length_units:
value = value.strip()[:-2]
elif value.strip()[-1:] == '%':
value = float(value.strip()[:-1])/100
try:
result = int(value)
except:
try:
result = int(float(value))
except:
print_error()
return result
def to_float(value = None):
result = 1.0
try:
result = float(value)
except:
if value.strip()[-2:] in absolute_length_units:
value = convert_length_units(value)
elif value.strip()[-2:] in relative_length_units:
value = value.strip()[:-2]
elif value.strip()[-1:] == '%':
value = float(value.strip()[:-1])/100
try:
result = float(value)
except:
print_error()
return result
def normal_color(value = '#000000'):
if value[:2] == 'rgb':
value = eval(value)
elif value[0] == '#':
if len(value[1:]) == 3:
value = value[0]+value[1]*2+value[2]*2+value[3]*2
elif 'url(#' in value:
return value
elif value.strip().lower() == 'none':
value = None
return value
def wx_alpha_opaque(value = 1.0):
if value == 1.0:
value = 255
elif isinstance(value, float):
if value < 0.0:
value = abs(value)
if 0.0 < value < 1.0:
value = int(value * 255)
if value > 255.0:
value = 255
if not isinstance(value, int):
value = int(value)
return value
def wx_color(value = '#000000', alpha_opaque = 255):
if value is None:
return '#000000'
elif 'url(#' in value:
return value
else:
return value
def parse_polyline_points(value = '', separator = ' '):
result = []
pre_result = value.split(separator)
for item in pre_result:
x, y = item.split(',')
result.append((to_float(x), to_float(y)))
return result
def parse_polygon_points(value = '', separator = ' ', alternative_separator = ', '):
result = []
pre_result = value.strip().split(separator)
if len(pre_result) < 2:
pre_result = pre_result[1].split(alternative_separator)
for item in pre_result:
xy = item.split(',')
result.append((to_float(xy[0]), to_float(xy[1])))
if pre_result[0] != pre_result[-1]:
xy = pre_result[0].split(',')
result.append((to_float(xy[0]), to_float(xy[1])))
return result
def parse_style(value = ''):
result = {}
for item in value.split(';'):
key_value = item.split(':')
if len(key_value) > 1:
key = key_value[0].strip()
value = key_value[1].strip()
if key in ('stroke', 'fill'):
value = normal_color(value)
elif key in ('stroke-width', ):
value = to_float(value)
elif key in ('stroke-opacity', 'fill-opacity'):
value = wx_alpha_opaque(float(value))
result[key] = value
if result.has_key('stroke'):
if result['stroke'] is None:
del result['stroke']
else:
if not result.has_key('stroke-width'):
result['stroke-width'] = 1.0
if not result.has_key('stroke-opacity'):
result['stroke-opacity'] = 255
result['stroke'] = wx_color(result['stroke'], result['stroke-opacity'])
if result.has_key('fill'):
if result['fill'] is None:
del result['fill']
else:
if result['fill'].find('url(#') == -1:
if not result.has_key('fill-opacity'):
result['fill-opacity'] = 255
result['fill'] = wx_color(result['fill'], result['fill-opacity'])
return result
def parse_text_style(value = ''):
result = parse_style(value)
if result.has_key('font-size'):
real_value = to_int(result['font-size'])
if result['font-size'].strip()[-2:] == 'px':
result['font-size'] = int(real_value * 72 / px_x)
else:
result['font-size'] = real_value
if result.has_key('font-style'):
if result['font-style'].lower() == 'normal':
result['font-style'] = 'normal'
elif result['font-style'].lower() == 'slant':
result['font-style'] = 'slant'
elif result['font-style'].lower() == 'italic':
result['font-style'] = 'italic'
if result.has_key('font-weight'):
if result['font-weight'].lower() == 'normal':
result['font-weight'] = 'normal'
elif result['font-weight'].lower() == 'light':
result['font-weight'] = 'light'
elif result['font-weight'].lower() == 'bold':
result['font-weight'] = 'bold'
return result
def parse_stop_style(element_dict, value = ''):
for item in value.split(';'):
key_value = item.split(':')
if len(key_value) > 1:
key = key_value[0].strip()
value = key_value[1].strip()
if key == 'stop-color':
element_dict['stop-color'] = wx_color(normal_color(value))
elif key == 'stop-opacity':
element_dict['stop-opacity'] = wx_alpha_opaque(float(value))
def parse_transform(value = ''):
result = {}
for item in value.split(';'):
if item.find('matrix') > -1:
result['matrix'] = eval(item[item.find('matrix'):])
elif item.find('translate') > -1:
result['translate'] = eval(item[item.find('translate'):])
elif item.find('rotate') > -1:
result['rotate'] = eval(item[item.find('rotate'):])
elif item.find('scale') > -1:
result['scale'] = eval(item[item.find('scale'):])
elif item.find('skewX') > -1:
result['skewX'] = eval(item[item.find('skewX'):])
elif item.find('skewY') > -1:
result['skewY'] = eval(item[item.find('skewY'):])
return result
def fill_svg_container(root, result = {}):
for element in root.getchildren():
element_dict = None
if element.tag == 'defs' or element.tag[-5:] == '}defs':
element_dict = {'svg_key':'defs', 'children':[]}
fill_svg_container(element, element_dict)
elif element.tag == 'metadata' or element.tag[-9:] == '}metadata':
element_dict = {'svg_key':'metadata', 'value':element}
elif element.tag == 'title' or element.tag[-6:] == '}title':
result['title'] = element.text
continue
elif element.tag == 'desc' or element.tag[-5:] == '}desc':
result['desc'] = element.text
continue
elif element.tag == 'stop' or element.tag[-5:] == '}stop':
element_dict = {'svg_key':'stop',
'offset':to_float(element.attrib.get('offset', 0)),
'stop-color':wx_color(element.attrib.get('stop-color', '#000000')),
'stop-opacity':wx_alpha_opaque(element.attrib.get('stop-opacity', 1.0))}
if element.attrib.has_key('style'):
parse_stop_style(element_dict, element.attrib['style'])
elif element.tag == 'linearGradient' or element.tag[-15:] == '}linearGradient':
element_dict = {'svg_key':'linearGradient', 'children':[]}
for key, value in element.attrib.iteritems():
if key == 'href' or key[-5:] == '}href':
for item in result['children']:
if item.get('id', '') == value.strip('#'):
element_dict = dict(item)
element_dict['x1'] = to_float(element.attrib.get('x1', 0))
element_dict['y1'] = to_float(element.attrib.get('y1', 0))
element_dict['x2'] = to_float(element.attrib.get('x2', 0))
element_dict['y2'] = to_float(element.attrib.get('y2', 0))
fill_svg_container(element, element_dict)
elif element.tag == 'radialGradient' or element.tag[-15:] == '}radialGradient':
element_dict = {'svg_key':'radialGradient', 'children':[]}
for key, value in element.attrib.iteritems():
if key == 'href' or key[-5:] == '}href':
for item in result['children']:
if item.get('id', '') == value.strip('#'):
element_dict = dict(item)
element_dict['cx'] = to_float(element.attrib.get('cx', 0))
element_dict['cy'] = to_float(element.attrib.get('cy', 0))
element_dict['r'] = to_float(element.attrib.get('r', 0))
element_dict['fx'] = to_float(element.attrib.get('fx', 0))
element_dict['fy'] = to_float(element.attrib.get('fy', 0))
fill_svg_container(element, element_dict)
elif element.tag == 'text' or element.tag[-5:] == '}text':
text = element.text
if text is None:
text = ''
element_dict = {'svg_key':'text', 'value':text, 'x':to_float(element.attrib['x']), 'y':to_float(element.attrib['y'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_text_style(element.attrib['style'])
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
elif element.tag == 'line' or element.tag[-5:] == '}line':
element_dict = {'svg_key':'line', 'x1':to_float(element.attrib['x1']), 'x2':to_float(element.attrib['x2']), 'y1':to_float(element.attrib['y1']), 'y2':to_float(element.attrib['y2'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
elif element.attrib.has_key('stroke'):
element_dict['stroke'] = wx_color(normal_color(element.attrib['stroke']))
if element.attrib.has_key('stroke-width'):
element_dict['stroke-width'] = to_float(element.attrib['stroke-width'])
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
elif element.tag == 'polyline' or element.tag[-9:] == '}polyline':
element_dict = {'svg_key':'polyline', 'points':parse_polyline_points(element.attrib['points'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
elif element.tag == 'polygon' or element.tag[-8:] == '}polygon':
element_dict = {'svg_key':'polyline', 'points':parse_polygon_points(element.attrib['points'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
elif element.tag == 'circle' or element.tag[-7:] == '}circle':
element_dict = {'svg_key':'circle', 'cx':to_float(element.attrib['cx']), 'cy':to_float(element.attrib['cy']), 'r':to_float(element.attrib['r'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
elif element.tag == 'ellipse' or element.tag[-8:] == '}ellipse':
element_dict = {'svg_key':'ellipse', 'cx':to_float(element.attrib['cx']), 'cy':to_float(element.attrib['cy']), 'rx':to_float(element.attrib['rx']), 'ry':to_float(element.attrib['ry'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
elif element.tag == 'rect' or element.tag[-5:] == '}rect':
element_dict = {'svg_key':'rect', 'x':to_float(element.attrib['x']), 'y':to_float(element.attrib['y']), 'width':to_float(element.attrib['width']), 'height':to_float(element.attrib['height'])}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
if element.attrib.has_key('stroke'):
element_dict['stroke'] = wx_color(normal_color(element.attrib['stroke']))
if element.attrib.has_key('stroke-width'):
element_dict['stroke-width'] = to_float(element.attrib['stroke-width'])
if element.attrib.has_key('fill'):
element_dict['fill'] = wx_color(normal_color(element.attrib['fill']))
if element.attrib.has_key('rx'):
element_dict['rx'] = to_float(element.attrib['rx'])
elif element.tag == 'path' or element.tag[-5:] == '}path':
try:
d = svg_path_parser.parse(element.attrib['d'])
except:
print_error()
print element.attrib['d']
continue
element_dict = {'svg_key':'path', 'd':d}
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
elif element.tag == 'image' or element.tag[-6:] == '}image':
element_dict = {'svg_key':'image',
'x':to_float(element.attrib.get('x', 0)),
'y':to_float(element.attrib.get('y', 0)),
'width':to_float(element.attrib.get('width', 0)),
'height':to_float(element.attrib.get('height', 0))}
for key, value in element.attrib.iteritems():
if key == 'href' or key[-5:] == '}href':
element_dict['href'] = value
elif element.tag == 'g' or element.tag[-2:] == '}g':
element_dict = {'svg_key':'g', 'children':[]}
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
fill_svg_container(element, element_dict)
elif element.tag == 'a' or element.tag[-2:] == '}a':
element_dict = {'svg_key':'a', 'children':[]}
if element.attrib.has_key('transform'):
element_dict['transform'] = element.attrib['transform']
if element.attrib.has_key('style'):
element_dict['style'] = parse_style(element.attrib['style'])
for key, value in element.attrib.iteritems():
if key == 'href' or key[-5:] == '}href':
element_dict['href'] = value
fill_svg_container(element, element_dict)
else:
comment = 'Sorry, unimplemented svg tag'
element_dict = {'svg_key':element.tag, 'value':element, 'comment':comment}
print comment, ': ', element.tag
element_dict['id'] = element.attrib.get('id', str(id(element)))
result['children'].append(element_dict)
def parse_xml_data(xml_data = ''):
result = {'width':100.0, 'height':100.0,
'viewBox':[0, 0, 1000, 1000],
'origin_x':0, 'origin_y':0,
'scale_x':0, 'scale_y':0,
'children':[]}
dom = cElementTree.parse(StringIO(xml_data))
if isinstance(dom, cElementTree.ElementTree):
svg_root = dom.getroot()
width, height = '0', '0'
if svg_root.attrib.has_key('width'):
width = svg_root.attrib['width']
result['width'] = to_float(width)
else:
result['width'] = 0
if svg_root.attrib.has_key('height'):
height = svg_root.attrib['height']
result['height'] = to_float(height)
else:
result['height'] = 0
if svg_root.attrib.has_key('viewBox'):
result['viewBox'] = []
for item in svg_root.attrib['viewBox'].split():
result['viewBox'].append(float(item))
if width[-1] == '%':
result['width'] = result['viewBox'][2]*result['width']
if height[-1] == '%':
result['height'] = result['viewBox'][3]*result['height']
else:
result['viewBox'][2] = result['width']
result['viewBox'][3] = result['height']
result['origin_x'], result['origin_y'] = result['viewBox'][0], result['viewBox'][1]
if result['origin_x'] < 0:
result['origin_x'] *= -1
if result['origin_y'] < 0:
result['origin_y'] *= -1
if result['width'] != result['viewBox'][2]:
result['scale_x'] = result['width'] / result['viewBox'][2]
if result['width'] < result['viewBox'][2]:
result['scale_x'] *= -1
if result['height'] != result['viewBox'][3]:
result['scale_y'] = result['height'] / result['viewBox'][3]
if result['height'] < result['viewBox'][3]:
result['scale_y'] *= -1
fill_svg_container(svg_root, result)
return result
|
from multiprocessing import TimeoutError
from .pool import Pool
__all__ = ["Pool", "TimeoutError"]
|
def pick2(arr):
list = []
list.append((arr[0], arr[1])) # tuple
list.append((arr[0], arr[2]))
list.append((arr[0], arr[3]))
list.append((arr[0], arr[3]))
for i in range(len(arr)-1):
for j in range(i+1, len(arr)):
list.append((arr[i], arr[j]))
return list
# arr = [1, 2, 3, 4, 5, 6, 7]
# arr = [i for i in range(0, 8)]
arr = [i+1 for i in range(0, 7)]
print(arr)
print(pick2(arr))
# 7C2 = 21 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
import os
import time
from datetime import datetime
import tweepy
import requests
auth = tweepy.OAuthHandler(os.environ['CONSUMER_KEY'], os.environ['CONSUMER_SECRET'])
auth.set_access_token(os.environ['ACCESS_KEY'], os.environ['ACCESS_SECRET'])
api = tweepy.API(auth)
# would want a db in the future if I do more complex things
processed_files = []
fec_params = {
'api_key': os.environ['FEC_API_KEY'],
# don't want to flood the feed with repeats
'min_receipt_date': datetime.now(),
}
logging.info('Running...')
while True:
filings = requests.get('https://api.open.fec.gov/v1/efile/filings/?sort=-receipt_date&per_page=70', params=fec_params).json()
if 'results' in filings:
for record in filings['results']:
if record['file_number'] not in processed_files:
committee_name = str(record['committee_name'] or '')[:116]
candidate_name = str(record['candidate_name'] or '')[:116]
link = 'http://docquery.fec.gov/cgi-bin/forms/{0}/{1}'.format(record['committee_id'], record['file_number'])
message = committee_name + candidate_name + ' ' + link
if record['amends_file'] is not None:
message = committee_name[:106] + ' ' + link +' amendment'
api.update_status(message)
processed_files.append(record['file_number'])
if len(processed_files) > 500:
processed_files = processed_files[50:]
time.sleep(10)
|
import SocketServer
import socket
import math
host = 'localhost'
port_client = 8557
port_server_sum = '8560'
port_server_sub = '8561'
port_server_mul = '8562'
port_server_div = '8563'
port_server_pow = '8564'
port_server_sqr = '8565'
port_server_log = '8566'
class myHandler(SocketServer.BaseRequestHandler):
def handle(self):
self.number1 = self.request.recv(1024)
exit = False
while not exit:
if int(self.number1) >= 1 & int(self.number1) <= 8:
if int(self.number1) == 1:
self.request.send(port_server_sum)
print "Port number of Server sum has been sent!"
exit = True
elif int(self.number1) == 2:
self.request.send(port_server_sub)
print "Port number of Server sub has been sent!"
exit = True
elif int(self.number1) == 3:
self.request.send(port_server_mul)
print "Port number of Server Mul has been sent!"
exit = True
elif int(self.number1) == 4:
self.request.send(port_server_div)
print "Port number of Server div has been sent!"
exit = True
elif int(self.number1) == 5:
self.request.send(port_server_pow)
print "Port number of Server pow has been sent!"
exit = True
elif int(self.number1) == 6:
self.request.send(port_server_sqr)
print "Port number of Server sqr has been sent!"
exit = True
elif int(self.number1) == 7:
self.request.send(port_server_log)
print "Port number of Server log has been sent!"
exit = True
else:
print "Server with that refence does not exist!"
exit = True
def main():
server = SocketServer.TCPServer((host, port_client), myHandler, bind_and_activate = False)
server.allow_reuse_address = True
server.server_bind()
server.server_activate()
if server:
print "Server On"
server.serve_forever()
main() |
from django.conf.urls import url
from . import views
urlpatterns=[
url(r'post_todo_item$', views.add_todo_item,),
url(r'add_todo_items$', views.add_todo_items,),
url(r'get_todo_items$', views.get_todo_items),
url(r'put_todo_item$',views.put_todo_item),
url(r'delete_todo_item$',views.delete_todo_item),
] |
#!/usr/bin/env python
"""Script used for performing a forward pass on a previously trained model and
visualizing the predicted primitives.
"""
import argparse
import os
import sys
import numpy as np
import torch
import trimesh
from simple_3dviz import Mesh, Spherecloud
from simple_3dviz.behaviours import SceneInit
from simple_3dviz.behaviours.misc import LightToCamera
from simple_3dviz.behaviours.keyboard import SnapshotOnKey
from simple_3dviz.behaviours.movements import CameraTrajectory
from simple_3dviz.behaviours.trajectory import Circle
from simple_3dviz.behaviours.io import SaveFrames, SaveGif
from simple_3dviz.utils import render
from simple_3dviz.window import show
from arguments import add_dataset_parameters
from utils import load_config, points_on_sphere
from visualization_utils import scene_init, load_ground_truth, \
get_colors, jet, colormap
from neural_parts.datasets import build_dataset
from neural_parts.models import build_network
from neural_parts.utils import sphere_mesh
from neural_parts.metrics import iou_metric
def main(argv):
parser = argparse.ArgumentParser(
description="Do the forward pass and visualize the recovered parts"
)
parser.add_argument(
"config_file",
help="Path to the file that contains the experiment configuration"
)
parser.add_argument(
"output_directory",
help="Save the output files in that directory"
)
parser.add_argument(
"--weight_file",
default=None,
help=("The path to a previously trained model to continue"
" the training from")
)
parser.add_argument(
"--prediction_file",
default=None,
help="The path to the predicted primitives"
)
parser.add_argument(
"--save_frames",
help="Path to save the visualization frames to"
)
parser.add_argument(
"--without_screen",
action="store_true",
help="Perform no screen rendering"
)
parser.add_argument(
"--n_frames",
type=int,
default=200,
help="Number of frames to be rendered"
)
parser.add_argument(
"--background",
type=lambda x: list(map(float, x.split(","))),
default="1,1,1,1",
help="Set the background of the scene"
)
parser.add_argument(
"--up_vector",
type=lambda x: tuple(map(float, x.split(","))),
default="0,0,1",
help="Up vector of the scene"
)
parser.add_argument(
"--camera_target",
type=lambda x: tuple(map(float, x.split(","))),
default="0,0,0",
help="Set the target for the camera"
)
parser.add_argument(
"--camera_position",
type=lambda x: tuple(map(float, x.split(","))),
default="-2.0,-2.0,-2.0",
help="Camera position in the scene"
)
parser.add_argument(
"--window_size",
type=lambda x: tuple(map(int, x.split(","))),
default="512,512",
help="Define the size of the scene and the window"
)
parser.add_argument(
"--with_rotating_camera",
action="store_true",
help="Use a camera rotating around the object"
)
parser.add_argument(
"--mesh",
action="store_true",
help="Visualize the target mesh"
)
parser.add_argument(
"--no_color",
action="store_true",
help="Use the same color for all the primitives"
)
parser.add_argument(
"--show_vertices",
action="store_true",
help="Show the vertices as a sphere cloud"
)
parser.add_argument(
"--n_vertices",
type=int,
default=10000,
help="How many vertices to use per part"
)
parser.add_argument(
"--only_part",
type=int,
default=None,
help="Show only a specific part if given"
)
add_dataset_parameters(parser)
args = parser.parse_args(argv)
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
print("Running code on", device)
# Check if output directory exists and if it doesn't create it
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
config = load_config(args.config_file)
# Extract the number of primitives
n_primitives = config["network"]["n_primitives"]
# Dictionary to keep the predictions used for the evaluation
predictions = {}
if args.prediction_file is None:
# Instantiate a dataset to generate the samples for evaluation
dataset = build_dataset(
config,
args.model_tags,
args.category_tags,
["train", "val", "test"],
random_subset=args.random_subset
)
assert len(dataset) == 1
# Build the network architecture to be used for training
network, _, _ = build_network(config, args.weight_file, device=device)
network.eval()
# Create the prediction input
with torch.no_grad():
for sample in dataset:
sample = [s[None] for s in sample] # make a batch dimension
X = sample[0].to(device)
targets = [yi.to(device) for yi in sample[1:]]
F = network.compute_features(X)
phi_volume, _ = network.implicit_surface(F, targets[0])
y_pred, faces = network.points_on_primitives(
F, args.n_vertices, random=False, mesh=True,
union_surface=False
)
predictions["phi_volume"] = phi_volume
predictions["y_prim"] = y_pred
else:
preds = torch.load(args.prediction_file, map_location="cpu")
y_pred = preds[4]
faces = preds[5]
targets = preds[0]
predictions["phi_volume"] = preds[1]
predictions["y_prim"] = y_pred
print("IOU:", iou_metric(predictions, targets))
# Get the renderables from the deformed vertices and faces
vertices = y_pred.detach()
parts = range(n_primitives)
if args.only_part is not None:
parts = [args.only_part]
renderables = [
Mesh.from_faces(
vertices[0, :, i],
faces,
colors=get_colors(0 if args.no_color else i)
)
for i in parts
]
if args.show_vertices:
renderables = [
Mesh.from_faces(
vertices[0, :, i],
faces,
colors=colormap(np.linspace(0, 1, vertices.shape[1]))
)
for i in parts
]
behaviours = [
SceneInit(
scene_init(
load_ground_truth(dataset) if args.mesh else None,
args.up_vector,
args.camera_position,
args.camera_target,
args.background
)
),
LightToCamera(),
]
if args.with_rotating_camera:
behaviours += [
CameraTrajectory(
Circle(
args.camera_target,
args.camera_position,
args.up_vector
),
speed=1/180
)
]
if args.without_screen:
path_to_gif = "/{}/{}.gif".format(
args.output_directory, args.model_tags[0]
)
behaviours += [
SaveFrames(args.save_frames, 1),
SaveGif(path_to_gif, 1)
]
render(renderables, size=args.window_size, behaviours=behaviours,
n_frames=args.n_frames)
else:
show(renderables, size=args.window_size,
behaviours=behaviours + [SnapshotOnKey()])
print("Saving renderables to file")
for i in range(n_primitives):
m = trimesh.Trimesh(vertices[0, :, i].detach(), faces)
m.export(
os.path.join(args.output_directory, "part_{:03d}.obj".format(i)),
file_type="obj"
)
if __name__ == "__main__":
main(sys.argv[1:])
|
def gcd(a,b):
while b != 0:
a, b = b, a%b
return a
def coprime(a,b):
r=0.
if gcd(a,b) == 1:
r=1.
return r
for N in range(4,51):
t=4*(N-2)*(N-1)+4*(N-4)*(N-2)
for y in range(3,N/2+1):
for x in range(1,y/2+1):
t=t+8*coprime(x,y)*(N-2*y)*(N-y)
print N, t, t%8.
import sys
sys.exit()
for N in range(4,51):
t=4*(N-2)*(N-1)
for y in range(1,N/2+1):
for x in range(1,N/2+1):
if x!=y:
t=t+2*coprime(x,y)*(N-2*y)*(N-2*x)
print N, t, t%8.
|
# coding: utf-8
from django.db import models
# Create your models here.
class User(models.Model):
"""ユーザー"""
nfc_id = models.CharField('NFCID', max_length=64,primary_key=True)
employee_no = models.CharField('社員番号', max_length=64)
name = models.CharField('氏名', max_length=256)
def __unicode__(self):
return self.name
class Equipment(models.Model):
"""備品"""
barcode = models.CharField('バーコード', max_length=64, primary_key=True)
name = models.CharField('品名', max_length=256)
manage_no = models.CharField('資産管理番号', max_length=128)
manage_user = models.CharField('管理者', max_length=256)
comment = models.TextField('備考', blank=True)
def __unicode__(self):
return self.name
class Rental(models.Model):
"""貸出"""
equipment = models.ForeignKey(Equipment)
user = models.ForeignKey(User)
processing = models.CharField('処理', max_length=64)
created_at = models.DateTimeField('更新時間', auto_now_add=True)
def is_rentaled(self):
return self.processing == "rent"
def __unicode__(self):
return self.equipment.name |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from subprocess import check_call, CalledProcessError
from django.template import RequestContext
import json, httplib
def contact(req):
return HttpResponse('Not Implemented Yet', status=501)
def about(req):
context = RequestContext(req)
return render_to_response('repository/about.jade', context_instance=context)
def terms(req):
context = RequestContext(req)
return render_to_response('repository/terms.jade', context_instance=context)
def rant(req):
return HttpResponse('Not Implemented Yet', status=501)
def invalid_urls(req):
if req.GET.get('refresh', False):
code = refresh_invalid_urls()
return HttpResponse(json.dumps({'returncode':code}), mimetype='application/json')
url = '/records/_design/manage/_view/invalidUrls'
conn = httplib.HTTPConnection('localhost:5984')
conn.request(method='GET', url=url)
response = conn.getresponse()
content = response.read()
return render_to_response('repository/invalidUrls.jade', json.loads(content))
def refresh_invalid_urls():
try:
return check_call(['/usr/local/bin/node', 'flagInvalidUrls.js'], cwd='/Users/ryan/dev/metadata-server/build/helpers')
except CalledProcessError as err:
return err.returncode |
from plotly.offline import plot, iplot
import plotly.graph_objs as go
import numpy as np
import matplotlib as mpl
import plotly.plotly as py
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
x_negative = ["X8","X7","X6","X5"]
x_positive = ["X4","X3","X2","X1"]
y_negative = [16,45,16,20]
y_positive = [-16,-45,-5,-38]
trace_1 = go.Bar(y=x_negative, x=y_negative, name="<b>Negative</b>",
orientation="h",
marker=dict(color="#F6E3CE", line=dict(color='rgb(107,131,166)', width=1)))
trace_2 = go.Bar(y=x_positive, x=y_positive, name="<b>Positive</b>",
orientation="h",
marker=dict(color="#A9D0F5", line=dict(color='rgb(166,135,107)', width=1)))
layout_churn = dict(title="<b>Correlation with employees probability of churn</b>",
yaxis=dict(title="<b>Variable</b>"))
data_churn = [trace_1,trace_2]
figure_churn = dict(data=data_churn, layout=layout_churn)
|
#RPG Dice system
#by Fábio Pinto
import random
def rollDice():
dice = input(f'\nChoose a dice to roll.(d2, d4, d6, d8, d10, d12, d20, d100)\n')
if str(dice.lower())[0].isalpha():
rollSingleDie(dice)
elif str(dice.lower())[0].isnumeric():
rollMultiDice(dice)
else:
print('To roll a single die use the prefix "d"')
playAgain()
def rollSingleDie(dice):
splitText = dice.split('+',1)
if str(dice.lower())[0] == 'd':
diceNumber = str(dice.lower())
diceNumber = diceNumber[1:4]
diceNumber = int(diceNumber)
result = random.randint(1,diceNumber)
if len(splitText) > 1:
diceModifier = int(splitText[1])
print(f'\nYou rolled a {dice.lower()}')
print(f'Your result is: {result} + {diceModifier} = {result+diceModifier}')
else:
print(f'\nYou rolled a {dice.lower()}')
print(f'Your result is: {result}')
else:
print('To roll a single die use the prefix d')
def rollMultiDice(dice):
split_DiceModifier = dice.split('+')
if len(split_DiceModifier) == 2:
diceIndexNumber = split_DiceModifier[0]
split_IndexNumber = diceIndexNumber.split('d',1)
diceIndex = int(split_IndexNumber[0])
diceNumber = int(split_IndexNumber[1])
diceModifier = int(split_DiceModifier[1])
totalRoll = 0
rollNumber = 0
for x in range(diceIndex):
currentRoll = random.randint(1,diceNumber)
totalRoll+=currentRoll
rollNumber+=1
print(f'Roll #{rollNumber}: {currentRoll}')
print(f'And your total roll is: {totalRoll} + {diceModifier} = {totalRoll+diceModifier}')
elif len(split_DiceModifier) == 1:
diceIndexNumber = split_DiceModifier[0]
split_IndexNumber = diceIndexNumber.split('d',1)
diceIndex = int(split_IndexNumber[0])
diceNumber = int(split_IndexNumber[1])
totalRoll = 0
rollNumber = 0
for x in range(diceIndex):
currentRoll = random.randint(1,diceNumber)
totalRoll+=currentRoll
rollNumber+=1
print(f'Roll #{rollNumber}: {currentRoll}')
print(f'And your total roll is: {totalRoll}')
else:
print('To roll multiple dice, use "XdY + Z"')
def playAgain():
playAgain = input(f'\nDo you wish to roll again? (y/n)\n')
if playAgain == 'y':
rollDice()
elif playAgain =='n':
input(f'Press any key to exit\n')
rollDice() |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
options = Table('options', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('opt', String(length=140)),
)
question = Table('question', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('qname', String(length=140)),
Column('quest', String(length=140)),
Column('cora', Integer),
Column('author', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['options'].create()
post_meta.tables['question'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['options'].drop()
post_meta.tables['question'].drop()
|
# -*- coding: utf-8 -*-
from ._TIC_Tools import *
from datetime import datetime
from datetime import timedelta
from io import BytesIO, StringIO
def _read_stream2(stream, length):
# if not isinstance(length, int):
# raise TypeError("expected length to be int")
if length < 0:
raise ValueError("length must be >= 0", length)
data = stream.read(length)
if len(data) != length:
raise FieldError("could not read enough bytes, expected %d, found %d" % (length, len(data)))
return data
def _write_stream2(stream, length, data):
# if not isinstance(data, bytes):
# raise TypeError("expected data to be a bytes")
if length < 0:
raise ValueError("length must be >= 0", length)
if len(data) != length:
raise FieldError("could not write bytes, expected %d, found %d" % (length, len(data)))
written = stream.write(data)
if written is not None and written != length:
raise FieldError("could not write bytes, written %d, should %d" % (written, length))
class TICEnum(Construct):
r"""
Parses the length(bit8 = 1)/or Enum field (bit 8 = 0).
If Enum field Then set the subcon according to Enum Array passed in parameters
If Length field, parse a string of (legnth filed)&0x7F size
.. seealso:: The prefixed class from whom this is inspired
:param EnumArray: an array containing enum strings
Example::
>>> TICEnum(_E_CONTRAT_STRINGS).parse(b"\x85AZERT?????")
b'AZERT'
>>> TICEnum(_E_CONTRAT_STRINGS).parse(b"\x05??????????")
b'HTA_5'
"""
__slots__ = ["name","enum_strings"]
def __init__(self, enum_strings):
super(TICEnum, self).__init__()
self.enum_strings = enum_strings
def _parse(self, stream, context, path):
length = _read_stream2(stream, 1)[0]
revision = GetValueFromKeyLookUP(context, 'rev')
revision = int(revision) if (revision != '') else 0
# print(revision)
if (length & 0x80):
length &= 0x7F
return (_read_stream2(stream, length).decode())
else:
if (revision >= 4852) and ( revision <= 5339):
length = length + 1
if (length < len(self.enum_strings)):
return (self.enum_strings[length])
else:
raise ExplicitError("Not a valid Enum : %d (0x%02x)" % (length,length))
def _build(self, obj, stream, context, path):
# obj should be a string
b = bytearray()
revision = GetValueFromKeyLookUP(context, 'rev')
revision = int(revision) if (revision != '') else 0
# print(revision)
try:
index = self.enum_strings.index(obj)
if (revision >= 4852) and ( revision <= 5339):
index = index - 1
b.append(index)
_write_stream2(stream, 1, b)
except ValueError:
length = len(obj)
b.append(length | 0x80)
b.extend(obj.encode())
_write_stream2(stream, length + 1, b)
return obj
def _sizeof(self, context, path):
raise SizeofError("TICEnum: cannot calculate size")
##############################################################################
# BEWARE : Following enums MUST match those from TIC sensor embedded parser #
##############################################################################
_E_CONTRAT_STRINGS = [
"_Err", "_AnyChange", "_Empty",
"BT 4 SUP36", "BT 5 SUP36", "HTA 5 ", "HTA 8 ",
"TJ EJP ", "TJ EJP-HH ", "TJ EJP-PM ", "TJ EJP-SD ", "TJ LU ",
"TJ LU-CH ", "TJ LU-P ", "TJ LU-PH ", "TJ LU-SD ", "TJ MU ",
"TV A5 BASE", "TV A8 BASE"]
_E_PT_STRINGS = [
"_Err","_AnyChange","_Empty",
" ? ",
"000", "HC", "HCD", "HCE", "HCH", "HH", "HH ", "HP", "HP ",
"HPD", "HPE","HPH", "JA", "JA ", "P","P ", "PM", "PM ", "XXX"]
_E_DIV_STRINGS = [
"_Err","_AnyChange","_Empty",
" ACTIF","ACTIF","CONSO","CONTROLE","DEP","INACTIF","PROD","TEST","kVA","kW"]
_E_STD_PT_STRINGS = [
"_Err","_AnyChange","_Empty",
" ? ",
"000", "HC", "HCD", "HCE", "HCH", "HH", "HH ", "HP", "HP ",
"HPD", "HPE","HPH", "JA", "JA ", "P","P ", "PM", "PM ", "XXX",
"INDEX NON CONSO","BASE","HEURE CREUSE","HEURE PLEINE","HEURE NORMALE","HEURE POINTE",
"HC BLEU","BUHC","HP BLEU","BUHP","HC BLANC","BCHC","HP BLANC","BCHP", "HC ROUGE","RHC","HP ROUGE","RHP",
"HEURE WEEK-END"]
_E_STD_CONTRAT_STRINGS = [
"_Err","_AnyChange","_Empty",
"BT 4 SUP36", "BT 5 SUP36", "HTA 5 ", "HTA 8 ",
"TJ EJP ", "TJ EJP-HH ", "TJ EJP-PM ", "TJ EJP-SD ", "TJ LU ",
"TJ LU-CH ", "TJ LU-P ", "TJ LU-PH ", "TJ LU-SD ", "TJ MU ",
"TV A5 BASE", "TV A8 BASE",
"BASE","H PLEINE-CREUSE","HPHC","HC","HC et Week-End","EJP","PRODUCTEUR"]
##########################################################################
# BEWARE : Above enums MUST match those from TIC sensor embedded parser #
##########################################################################
TYPE_E_CONTRAT = TICEnum(_E_CONTRAT_STRINGS)
TYPE_E_PT = TICEnum(_E_PT_STRINGS)
TYPE_E_DIV = TICEnum(_E_DIV_STRINGS)
TYPE_STD_E_CONTRAT = TICEnum(_E_STD_CONTRAT_STRINGS)
TYPE_STD_E_PT = TICEnum(_E_STD_PT_STRINGS)
# ADAPTER: "SYYMMDDHHMMSS" ==> 7 bytes
class SDMYhmsToUTF8Class(Adapter):
def _encode(self, obj, context):
res = obj[0] + ''.join([chr(int(obj[i:i+2])) for i in range(1, len(obj), 2)])
return res.encode()
def _decode(self, obj, context):
res="%c%02d%02d%02d%02d%02d%02d" % (obj[0],obj[1],obj[2],obj[3],obj[4],obj[5],obj[6])
return res
TYPE_SDMYhms = SDMYhmsToUTF8Class(Bytes(7))
# ADAPTER: "JJ/MM/AA HH:MM:SS" ==> 6 bytes
class DMYhmsToUTF8Class(Adapter):
def _encode(self, obj, context):
res = ''.join([chr(int(obj[i:i+2])) for i in range(0, len(obj), 3)])
return res.encode()
def _decode(self, obj, context):
res="%02d/%02d/%02d %02d:%02d:%02d" % (obj[0],obj[1],obj[2],obj[3],obj[4],obj[5])
return res
TYPE_DMYhms = DMYhmsToUTF8Class(Bytes(6))
TYPE_SDMYhmsU24 = Struct(
"Date" / TYPE_SDMYhms,
"Value" / Int24ub
)
TYPE_SDMYhmsU16 = Struct(
"Date" / TYPE_SDMYhms,
"Value" / Int16ub
)
TYPE_SDMYhmsU8 = Struct(
"Date" / TYPE_SDMYhms,
"Value" / Int8ub
)
# Timestamp date conversions:
# "JJ/MM/AA hh:mm:ss" ==> nb seconds since 01/01/2000
def _StrDateToTimestamp(strDate):
myDateRef = datetime.strptime('01/01/00 00:00:00', '%d/%m/%y %H:%M:%S')
myDate = datetime.strptime(strDate, '%d/%m/%y %H:%M:%S')
return int((myDate - myDateRef).total_seconds())
# nb seconds since 01/01/2000 ==> "JJ/MM/AA hh:mm:ss"
def _TimestampToStrDate(u32Seconds):
myDate = datetime.strptime('01/01/00 00:00:00', '%d/%m/%y %H:%M:%S')
myDate += timedelta(seconds=u32Seconds)
return myDate.strftime('%d/%m/%y %H:%M:%S')
# ADAPTER: "JJ/MM/AA HH:MM:SS" <==> Timestamp (U32)
class DMYhmsToTimeStampClass(Adapter):
def _encode(self, obj, context):
return _StrDateToTimestamp(obj)
def _decode(self, obj, context):
return _TimestampToStrDate(obj)
TYPE_tsDMYhms = DMYhmsToTimeStampClass(Int32ub)
TYPE_tsDMYhms_E_PT = Struct (
"Date" / TYPE_tsDMYhms,
"PT" / TYPE_E_PT,
)
TYPE_U32xbe = BytesTostrHexClass(Bytes(4))
TYPE_bf8d = Int8ub
class hhmmSSSSClass(Adapter):
# hhmmSSSS <=> b'xxxx'
def _encode(self, obj, context):
res = bytearray(b'')
if (obj[0:1] == 'N'):
res.append(255)
else:
res.append(int(obj[0:2]))
res.append(int(obj[2:4]))
res = res + bytearray.fromhex(obj[4:8])
return res
def _decode(self, obj, context):
res = ""
if (obj[0] == 0xFF):
res += "NONUTILE"
else:
res += "%02d%02d%02x%02x" % (obj[0],obj[1],obj[2],obj[3])
return res
TYPE_hhmmSSSS = Struct (
"FirstByte" / Peek(Int8ub),
"Value" / IfThenElse (this.FirstByte == 255,
hhmmSSSSClass(Bytes(1)),
hhmmSSSSClass(Bytes(4))
)
)
TYPE_11hhmmSSSS = TYPE_hhmmSSSS[11]
'''
# Below solution DOES NOT WORK TWICE. ONLY ONCE IF end of stream !!! (cause of GreedyByte)
class _11hhmmSSSSClass_(Adapter):
# 11 fois
# hhmmSSSS <=> b'xxxx'
# ou
# NONUTILE <=> b'\xFF'
def _encode(self, obj, context):
i = 0
res = bytearray(b'')
for j in range(0,11):
if (obj[i:i+1] == 'N'):
res.append(255)
else:
res.append(int(obj[i:i+2]))
res.append(int(obj[i+2:i+4]))
res = res + bytearray.fromhex(obj[i+4:i+8])
i = i + 9
return res
def _decode(self, obj, context):
i = 0
res = ""
for j in range(0,11):
if (obj[i] == 0xFF):
res += "NONUTILE"
i = i + 1
else:
res += "%02d%02d%02x%02x" % (obj[i],obj[i+1],obj[i+2],obj[i+3])
i = i + 4
if (j < 10):
res += ' '
return res
TYPE_11hhmmSSSS = _11hhmmSSSSClass(GreedyBytes)
'''
TYPE_U24_E_DIV = Struct (
"Value" / Int24ub,
"DIV" / TYPE_E_DIV,
) |
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('register/', views.registerPage, name='register_page'),
path('login/', views.logInPage, name='log_in_page'),
path('logout/', views.logOutPage, name='log_out_page'),
path('password-reset/',
auth_views.PasswordResetView.as_view(template_name='accounts/password_reset_form.html'),
name='password_reset'),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(template_name='accounts/password-reset-done.html'),
name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name='accounts/password-reset-confirm.html'),
name='password_reset_confirm'),
path('updateprofile/<id>/', views.updateProfilePage, name='update_profile_page')
] |
class User():
def __init__(self, id_, name, setting):
self.id = id_
self.name = name
self.setting = setting
def __str__(self):
user_str = f'id::{self.id}, name::{self.name}' # noqa
return user_str + str(self.setting)
|
import numpy as np
import scipy.sparse as sparse
import time
class richards:
def __init__(self,nhru,nsoil):
self.theta = np.zeros((nhru,nsoil))
self.thetar = np.zeros(nhru)
self.thetas = np.zeros(nhru)
self.b = np.zeros(nhru)
self.satpsi = np.zeros(nhru)
self.ksat = np.zeros(nhru)
self.dem = np.zeros(nhru)
self.slope = np.zeros(nhru)
#self.hand = np.zeros(nhru)
self.area = np.zeros(nhru)
self.dz = np.zeros((nhru,nsoil))
self.hdiv = np.zeros((nhru,nsoil))
self.m = np.zeros(nhru)
#Initialize the width array
self.width = []
self.I = []
return
def calculate_soil_moisture_potential(self,il):
eps = 0.01
theta = self.theta[:,il]
thetar = self.thetar
thetas = self.thetas
b = self.b
satpsi = self.satpsi #meters
m = (theta <= (1+eps)*thetar)
theta[m] = (1+eps)*thetar[m]
#psi = 1000.0*np.ones(theta.shape) #limit
#m = theta > thetar
#psi[m] = satpsi[m]*((theta[m] - thetar[m])/(thetas[m] - thetar[m]))**(-b[m])
#psi[m] = satpsi[m]*((theta[m] - thetar[m])/(thetas[m] - thetar[m]))**(-b[m])
'''
print thetas, thetar, theta
print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
for i in range(len(theta)):
print i, (theta[i]-thetar[i])/(thetas[i]-thetar[i])
print i, (theta-thetar)[i]/(thetas-thetar)[i]
print "@@@@@@@@@@"
#print np.true_divide((theta-thetar),(thetas-thetar)), satpsi, b
if len(thetas[~np.isfinite(thetas)]) > 0 :
print "INVALID:", thetas[~np.isfinite(thetas)]
thetas[~np.isfinite(thetas)] = np.nan
bad = np.invert(np.isfinite(theta-thetar))
if np.sum(bad):
print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$"
print (thetas-thetar)[bad]
'''
with np.errstate(invalid='ignore'):
psi = satpsi*((theta-thetar)/(thetas-thetar))**-b
return psi
def calculate_hydraulic_conductivity(self,psi,il):
af = 1.0 #safe
#sdz = np.cumsum(self.dz,axis=1)-self.dz/2.
#df = np.exp(-self.m[:,np.newaxis]/sdz)[:,il]
#Ksat_x = af*df*self.ksat[:] #lateral saturated hydraulic conductivity (multiply times anisotropy factor) [m/s]
Ksat_x = af*self.ksat[:] #lateral saturated hydraulic conductivity (multiply times anisotropy factor) [m/s]
with np.errstate(invalid='ignore'):
K_x = Ksat_x*(psi/self.satpsi)**(-2-3/self.b)
return K_x
def calculate_transmissivity(self,psi,ztop,zbot):
#af = 1.0 #safe
af = 2.0
m = np.copy(self.m)
Ksat_x = af*self.ksat[:] #lateral saturated hydraulic conductivity (multiply times anisotropy factor) [m/s]
with np.errstate(invalid='ignore', divide='ignore'):
K_x = Ksat_x*np.true_divide(psi,self.satpsi)**(-2-np.true_divide(3.,self.b))
K_x[~np.isfinite(K_x)] = np.nan
#Calculate transmissivity at top layer (exponential decay)
Ttop = m*K_x*np.exp(-ztop/m)
#Calculate transmissivity at bottom of layer (exponential decay)
Tbot = m*K_x*np.exp(-zbot/m)
T = Ttop - Tbot
return T
def calculate_hydraulic_head(self,psi):
h = self.dem - psi
return h
#def calculate_divergence_dense(self,h,K_x,dz):
def calculate_divergence_dense(self,h,T):
dh = h[:,np.newaxis] - h[np.newaxis,:]
#dx = self.dx #meters (distance between two adjacent grid cells)
dx = (np.abs(self.dem[:,np.newaxis] - self.dem[np.newaxis,:])**2 + self.dx**2)**0.5
w = np.array(self.width.todense())
area = self.area
with np.errstate(invalid='ignore',divide='ignore'):
#Khat = (K_x[:,np.newaxis]*K_x[np.newaxis,:]*(w+w.T))/(K_x[:,np.newaxis]*w.T + K_x[np.newaxis,:]*w)
That = np.true_divide((2*T[:,np.newaxis]*T[np.newaxis,:]),(T[:,np.newaxis] + T[np.newaxis,:]))
That[~np.isfinite(That)] = np.nan
#[mm/s] = [mm/m]*[m/s]*[m]/[m]*[m]*[m]/[m2]
calc_div = -1000.0*That*np.true_divide(dh,dx)*np.true_divide(w,area) # mm/s
calc_div[~np.isfinite(calc_div)] = np.nan
return calc_div
#def calculate_divergence_sparse(self,h,K_x,dz):
def calculate_divergence_sparse(self,h,T):
#Define the boolean matrix (connections or not?)
I = self.I
#Calculate dh
h1 = (I != 0).multiply(sparse.csr_matrix(h))
dh = h1.T - h1
#Calculate dx
d1 = (I != 0).multiply(sparse.csr_matrix(self.dem))
dx = d1.T - d1#**2 + self.dx**2)
dx = dx.power(2)
dx.data += self.dx**2
dx = dx.power(0.5)
#dx = (np.abs(d1.T - d1)**2 + self.dx**2)**0.5
#dx = (np.abs(d1.T - d1)**2 + self.dx**2)**0.5
#dx = (np.abs(self.dem[:,np.newaxis] - self.dem[np.newaxis,:])**2 + self.dx**2)**0.5
#Calculate the effective hydraulic conductivity
#k1 = (I != 0).multiply(sparse.csr_matrix(K_x))
#n = 2*k1.T.multiply(k1)
#d = k1.T+k1
#Khat = n.multiply(d.power(-1))
t1 = (I != 0).multiply(sparse.csr_matrix(T))
n = 2*t1.T.multiply(t1)
d = t1.T+t1
That = n.multiply(d.power(-1)).tocsr()
print(That.count_nonzero,self.width.count_nonzero)
#Calculate the flux
#[m/s] = [m/s]*[m]/[m]*[m]*[m]/[m2]
print(That.multiply(dh).shape)
print(That.multiply(dh).multiply(self.width).count_nonzero)
print(1.0/self.area)
print(That.multiply(dh).multiply(self.width).multiply(1.0/self.area).count_nonzero)
print(That.multiply(dh).multiply(self.width).multiply(1.0/self.area).multiply(dx.power(-1)).count_nonzero)
return -That.multiply(dh).multiply(self.width).multiply(1.0/self.area).multiply(dx.power(-1)).multiply(1000) #mm/s
#return -Khat.multiply(dh).multiply(self.width).multiply(dz/self.dx/self.area).multiply(1000) #mm/s
def update(self,type='sparse'):
#Determine if sparse or not
if self.nhru <= 1000: type = 'dense'
#Iterate per layer
for il in range(self.theta.shape[1]):
#Calculate soil moisture potential
psi = self.calculate_soil_moisture_potential(il)
#Calculate hydraulic conductivity
#K_x = self.calculate_hydraulic_conductivity(psi)
#ztop = np.cumsum(self.dz[0:il],axis=1)
zbot = np.sum(self.dz[:,0:il+1],axis=1)
ztop = zbot - self.dz[:,il]
T = self.calculate_transmissivity(psi,ztop,zbot)
#Calculate hydraulic head
h = self.calculate_hydraulic_head(psi)
#Calculate the divergence
if type == 'dense':
#q = self.calculate_divergence_dense(h,K_x)#,self.dz[:,il])
q = self.calculate_divergence_dense(h,T)
self.hdiv[:,il] = np.sum(q,axis=0) #mm/s
#print self.hdiv[:,il]
elif type == 'sparse':
q = self.calculate_divergence_sparse(h,T)#,self.dz[:,il])
self.hdiv[:,il] = q.sum(axis=0) #mm/s
return
|
from tastypie import fields
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
authorization= Authorization()
class ClientResource(ModelResource):
class Meta:
queryset = Client.objects.all()
resource_name = "client"
authorization= Authorization()
class OrderResource(ModelResource):
client = fields.ForeignKey(ClientResource, 'client')
class Meta:
queryset = Order.objects.all()
resource_name = 'orders'
authorization= Authorization()
|
from __future__ import print_function
import ROOT
import pytest
import re
PDG_PARTICLES = [
1, 2, 3, 4, -313, -213,
221, 323, 21, 310, 313, 223, -323, 213
]
KNOWN_PARTICLES = {
"Xi", "Sigma", "Lambda", "Delta",
"rho", "omega", "eta", "phi", "pi"
}
def pdg2name(x):
return ROOT.TParticle(x, *[0] * 13).GetName()
@pytest.mark.parametrize("pdg", PDG_PARTICLES)
def test_converts_names(pdg):
pname = pdg2name(pdg)
print(pdg, pname, name2rootname(pname))
def name2rootname(name):
for s in re.findall(r"[^a-zA-Z_]", name):
name = name.replace(s, "^{" + s + "}")
if "_bar" in name:
name = name.replace("_bar", "")
name = "#bar" + name
for k in KNOWN_PARTICLES:
if k in name:
name = name.replace(k, "#" + k)
if "_" in name:
idx = name.find("_")
name = name.replace(name[idx:], "_{" + name[idx + 1:] + "}")
return name
|
import os
import csv
import statistics
csvpath = os.path.join("..","Resources","budget_data.csv")
with open(csvpath, newline='', encoding= 'utf-8') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csv_header = next(csvreader)
Month =[]
Total=[]
Profit=[]
for row in csvreader:
Month.append(row[0])
Total.append(int(row[1]))
for n in range(len(Total)-1):
if (n-1) < (len(Total)-1):
diff=Total[n+1]-Total[n]
Profit.append(diff)
else:
diff=0
Profit.append(diff)
Greatest_Increase_Month=(Month[(Profit.index(max(Profit))+1)])
Greatest_Decrease_Month=(Month[(Profit.index(min(Profit))+1)])
Average_Change=round(statistics.mean(Profit),2)
Total_Months=len(Month)
Total_Amount=sum(Total)
def Results():
return ("Financial Analysis\n"+
"----------------------------\n"+
f"Total Months: {Total_Months} \n"+
f"Total: ${Total_Amount}\n"+
f"Average Change: ${Average_Change}\n"+
f"Greatest Increase in Profits: {Greatest_Increase_Month} (${max(Profit)})\n"+
f"Greatest Decrease in Profits: {Greatest_Decrease_Month} (${min(Profit)})\n")
print(Results())
"""
EXPORT OUTPUT TO TEXT FILE
"""
output_path = os.path.join("..", "Resources", "PyBankOutput.txt")
with open(output_path, 'w', newline='') as text:
text.write(Results())
|
import numpy as np
STUDENT = {'name': 'sam mordoch ,dvir ben abu',
'ID': '313295396 204675235'}
from loglinear import softmax
def classifier_output(x, params):
# YOUR CODE HERE.
# z is the layer before activate the activation function.
z_layers = []
# h is the layer after activation function.
h_layers = []
h = x
for index in range(0, len(params), 2):
w = params[index]
b = params[index + 1]
z = np.dot(h, w)
z = np.add(z, b)
h = np.tanh(z)
z_layers.append(z)
h_layers.append(h)
h_layers.pop()
z_layers.pop()
probs = softmax(z)
return probs, z_layers, h_layers
def predict(x, params):
predictionVec, _, _ = classifier_output(x, params)
return np.argmax(predictionVec)
def loss_and_gradients(x, y, params):
"""
params: a list as created by create_classifier(...)
returns:
loss,[gW1, gb1, gW2, gb2, ...]
loss: scalar
gW1: matrix, gradients of W1
gb1: vector, gradients of b1
gW2: matrix, gradients of W2
gb2: vector, gradients of b2
...
(of course, if we request a linear classifier (ie, params is of length 2),
you should not have gW2 and gb2.)
"""
# YOU CODE HERE
pred_vec, z_layers, h_layers = classifier_output(x, params)
y_hat = pred_vec[y]
loss = - np.log(y_hat)
y_hot_vector = np.zeros(pred_vec.shape)
y_hot_vector[y] = 1
gb = pred_vec - y_hot_vector
gWs_gbs = []
gWs_gbs.insert(0, gb)
Ws = params[0::2]
for index in range(len(Ws) - 1):
# print(index)
'''dloss\dw'''
# dl\dw = dl\dz * dz\dw
dz_dW = h_layers[-(index + 1)].T
gW = np.outer(dz_dW, gb)
gWs_gbs.insert(0, gW)
# dz_dh = 1 - np.tanh(z_layers[-(index + 1)] ** 2)
U = Ws[-(index + 1)]
dz_dh = 1 - (np.tanh(z_layers[-(index + 1)]) ** 2)
dz_dh = U.T * dz_dh
gb = np.dot(gb, dz_dh)
gWs_gbs.insert(0, gb)
gFirst_w = np.outer(x, gb)
gWs_gbs.insert(0, gFirst_w)
return loss, gWs_gbs
def create_classifier(dims):
"""
returns the parameters for a multi-layer perceptron with an arbitrary number
of hidden layers.
dims is a list of length at least 2, where the first item is the input
dimension, the last item is the output dimension, and the ones in between
are the hidden layers.
For example, for:
dims = [300, 20, 30, 40, 5]
We will have input of 300 dimension, a hidden layer of 20 dimension, passed
to a layer of 30 dimensions, passed to learn of 40 dimensions, and finally
an output of 5 dimensions.
Assume a tanh activation function between all the layers.
return:
a flat list of parameters where the first two elements are the W and b from input
to first layer, then the second two are the matrix and vector from first to
second layer, and so on.
"""
params = []
# Xavier Glorot et al's suggestion:
for dim1, dim2 in zip(dims, dims[1:]):
# sqrt 6 / sqrt m+n
epsilon = np.sqrt(6) / (np.sqrt(dim1 + dim2))
params.append(np.random.uniform(-epsilon, epsilon, [dim1, dim2]))
epsilon = np.sqrt(6) / (np.sqrt(dim2))
params.append(np.random.uniform(-epsilon, epsilon, dim2))
#
# for first_dim, second_dim in zip(dims, dims[1:]):
# W = np.zeros((first_dim, second_dim))
# b = np.zeros(second_dim)
# # Randomize the values so the gradients will change.
# W = np.random.randn(W.shape[0], W.shape[1])
# b = np.random.randn(b.shape[0])
# params.append(W)
# params.append(b)
return params
if __name__ == '__main__':
# Sanity checks. If these fail, your gradient calculation is definitely wrong.
# If they pass, it is likely, but not certainly, correct.
from grad_check import gradient_check
W, b, U, b_tag, V, b_tag_tag = create_classifier([2, 2, 2, 2])
def _loss_and_W_grad(W):
global b, U, b_tag, V, b_tag_tag
loss, grads = loss_and_gradients([1, 2], 0, [W, b, U, b_tag, V, b_tag_tag])
return loss, grads[0]
def _loss_and_b_grad(b):
global W, U, b_tag, V, b_tag_tag
loss, grads = loss_and_gradients([1, 2], 0, [W, b, U, b_tag, V, b_tag_tag])
return loss, grads[1]
def _loss_and_U_grad(U):
global W, b, b_tag, V, b_tag_tag
loss, grads = loss_and_gradients([1, 2], 0, [W, b, U, b_tag, V, b_tag_tag])
return loss, grads[2]
def _loss_and_b_tag_grad(b_tag):
global W, U, b, V, b_tag_tag
loss, grads = loss_and_gradients([1, 2], 0, [W, b, U, b_tag, V, b_tag_tag])
return loss, grads[3]
def _loss_and_V_grad(V):
global W, U, b, b_tag, b_tag_tag
loss, grads = loss_and_gradients([1, 2], 0, [W, b, U, b_tag, V, b_tag_tag])
return loss, grads[4]
def _loss_and_b_tag_tag_grad(b_tag_tag):
global W, U, b, V, b_tag
loss, grads = loss_and_gradients([1, 2], 0, [W, b, U, b_tag, V, b_tag_tag])
return loss, grads[5]
for _ in range(10):
W = np.random.randn(W.shape[0], W.shape[1])
U = np.random.randn(U.shape[0], U.shape[1])
V = np.random.randn(V.shape[0], V.shape[1])
b = np.random.randn(b.shape[0])
b_tag = np.random.randn(b_tag.shape[0])
b_tag_tag = np.random.randn(b_tag_tag.shape[0])
gradient_check(_loss_and_W_grad, W)
gradient_check(_loss_and_b_grad, b)
gradient_check(_loss_and_U_grad, U)
gradient_check(_loss_and_b_tag_grad, b_tag)
gradient_check(_loss_and_V_grad, V)
gradient_check(_loss_and_b_tag_tag_grad, b_tag_tag) |
import torch
def initialze_cuda(SEED):
"""Initialize the GPU if available
Arguments:
SEED : The value of seed to have amplost same distribution of data everytime we run the model
Returns:
cuda: True if GPU is available else False
device: 'cuda' or 'cpu'
"""
cuda = torch.cuda.is_available()
print('Is CUDA Available?', cuda)
# For reproducibility of results
torch.manual_seed(SEED)
if cuda:
torch.cuda.manual_seed(SEED)
device = torch.device("cuda" if cuda else "cpu")
return cuda, device
|
from .scanLog import scanLog
from .antfits import ANTFITS
from .dcrfits import DCRFITS
#import argparse
import numpy
import pylab as plt
from scipy import signal
from numpy import random
from scipy.optimize import curve_fit
#import pyfits
def gauss(x, height, width, center, offset):
return height * numpy.exp(-(x-center)**2 / width**2) + offset
class DCRPoint(object):
def __init__(self, projId="AGBT15B_338",
session=4, scan=9,
fileroot=None):
self.projId = projId
self.session = session
self.scan = scan
self.projDir = self.projId + "_0" + str(self.session)
self.fileroot = fileroot
def gatherData(self):
self.sl = scanLog(self.projDir, fileroot=self.fileroot)
antFile = self.sl.getAntenna(self.scan)
dcrFile = self.sl.getDCR(self.scan)
print antFile
# get DCR data
dcrHDU = DCRFITS(dcrFile)
dcrHDU.obtain_time_samples()
dcrHDU.obtain_data()
self.dcrTime = dcrHDU.dcrTimes
self.dcrData = dcrHDU.data
#dcrData = dcrHDU['DATA'].data.field('DATA')
dcrHDU.close()
self.dataArr = []
for i in range(6):
self.dataArr.append(self.dcrData[:, i])
# get Antenna header info, and data
# This only works for J2000 / encoder combination? I will need
# to check with Joe! Can get around this with a
# GetCurrentLocation("J2000") call
antHDU = ANTFITS(antFile)
antHDU.obtain_positional_data()
antHDU.obtain_time_samples()
self.antTime = antHDU.antTimes
self.source = antHDU.hdulist[0].header['OBJECT']
#self.obsc_el = antHDU.obsc_el
#self.mnt_el = antHDU.mnt_el
#self.sobsc_el = antHDU.sobsc_el
#self.smntc_el = antHDU.smntc_el
#self.obsc_az = antHDU.obsc_az
#self.mnt_az = antHDU.mnt_az
#self.sobsc_az = antHDU.sobsc_az
#self.smntc_az = antHDU.smntc_az
self.az, self.el = antHDU.compute_tracking_cen_coords()
antHDU.close()
#self.el = (self.obsc_el - self.mnt_el - (self.sobsc_el - self.smntc_el))
#self.az = (self.obsc_az - self.mnt_az - (self.sobsc_az - self.smntc_az))
# interpolate over time samples
self.data = []
for i in range(6):
self.data.append(numpy.interp(self.antTime, self.dcrTime, self.dataArr[i]))
# and create a "time since start" array
self.time = (self.antTime - self.antTime[0]) * 24.0 * 3600.0
def fit_data(self, chan=0, xtype='el', reorder=False):
y = self.data[chan].copy()
x = getattr(self, xtype).copy()
if reorder:
x = numpy.flipud(x)
y = numpy.flipud(y)
p4 = numpy.mean(y)
p1 = numpy.max(y) - p4
p2 = x[-1]/20.0
p3 = x[-1] / 2.0
print p1,p2,p3,p4
# do the fit
popt, pcov = curve_fit(gauss, x, y,
p0 = [p1, p2, p3, p4] )
self.fit = gauss(x, popt[0], popt[1], popt[2], popt[3])
self.fit_params = popt
# # This should be linearly increasing, but for planets it is flat.
# plt.plot(time,el)
# plt.show()
# # Here is what the data looks like
# plt.plot(time,data)
# plt.show()
# # hard code here some initial guesses
# p4 = numpy.mean(data)
# p1 = numpy.max(data) - p4
# p2 = time[-1] /20.0
# p3 = time[-1] / 2.0
# print p1,p2,p3,p4
# # do the fit
# popt, pcov = curve_fit(gauss, time, data,
# p0 = [p1, p2, p3, p4] )
# fit = gauss(time, popt[0],popt[1],popt[2],popt[3])
# # plot the results
# plt.plot(time,data)
# plt.plot(time,fit,"r-")
# plt.show()
|
'''
给你一个大小为 m x n 的网格和一个球。球的起始坐标为 [startRow, startColumn] 。你可以将球移到在四个方向上相邻的单元格内(可以穿过网格边界到达网格之外)。你 最多 可以移动 maxMove 次球。
给你五个整数 m、n、maxMove、startRow 以及 startColumn ,找出并返回可以将球移出边界的路径数量。因为答案可能非常大,返回对 109 + 7 取余 后的结果。
示例 1:
输入:m = 2, n = 2, maxMove = 2, startRow = 0, startColumn = 0
输出:6
示例 2:
输入:m = 1, n = 3, maxMove = 3, startRow = 0, startColumn = 1
输出:12
提示:
1 <= m, n <= 50
0 <= maxMove <= 50
0 <= startRow < m
0 <= startColumn < n
'''
from leetcode.tools.time import printTime
class Solution:
'''
DP
'''
@printTime()
def findPaths(self, m: int, n: int, maxMove: int, startRow: int, startColumn: int) -> int:
n, m = m, n
mod = 10 ** 9 + 7
dp = [[[0 for _ in range(maxMove)] for _ in range(m)] for _ in range(n)]
if maxMove > 0:
dp[startRow][startColumn][0] = 1
for k in range(1, maxMove):
for i in range(max(0, startRow - k), min(n, startRow + k + 1)):
for j in range(max(0, startColumn - k), min(m, startColumn + k + 1)):
if i > 0:
dp[i][j][k] += dp[i - 1][j][k - 1]
if i < n - 1:
dp[i][j][k] += dp[i + 1][j][k - 1]
if j > 0:
dp[i][j][k] += dp[i][j - 1][k - 1]
if j < m - 1:
dp[i][j][k] += dp[i][j + 1][k - 1]
dp[i][j][k] %= mod
cnt = 0
for i in range(0, n):
for k in range(maxMove):
cnt += dp[i][0][k] + dp[i][m - 1][k]
for j in range(0, m):
for k in range(maxMove):
cnt += dp[0][j][k] + dp[n - 1][j][k]
return cnt % mod
m = 2
n = 2
maxMove = 0
startRow = 0
startColumn = 0
Solution().findPaths(m, n, maxMove, startRow, startColumn) |
""" HTML web scratching """
from lxml import html
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
import xml.etree.ElementTree as ET
with open('GPL11154.txt') as f:
ff = f.readlines()
GSE = []
GSE_number = []
for item in ff:
if "GSE" in item:
a = item.strip()
b = a[a.find('GSE'):]
link = "http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + b
GSE_number.append(b)
GSE.append(link)
experiment = []
summary = []
title = []
for i in range(1000,2000):
page = requests.get(GSE[i])
soup = BeautifulSoup(page.content, 'lxml')
a = soup.find(string='Experiment type').parent
b = a.find_next_sibling('td').get_text(" ")
b = b.encode("utf-8")
experiment.append(b)
a = soup.find(string="Summary").parent
b = a.find_next_sibling('td').get_text(" ")
b = b.encode("utf-8")
summary.append(b)
a = soup.find(string="Title").parent
b = a.find_next_sibling('td').get_text(" ")
b = b.encode("utf-8")
title.append(b)
time.sleep(1)
data = pd.DataFrame({'GSE Number':GSE_number, 'Experiment':experiment, 'Title': title, 'Summary': summary})
data.to_csv("GPL11154.CSV")
###GSM breast cancer label scratch
from lxml import html
import requests
from bs4 import BeautifulSoup
import time
import pandas as pd
with open('GSE65216_GSMids.txt') as f:
ff = f.readlines()
GSM_links = []
for item in gsm:
a = item.strip()
link = "http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=" + a
GSM_links.append(link)
GSM = []
infor = []
for item in GSM_links:
page = requests.get(item)
soup = BeautifulSoup(page.content, 'lxml')
try:
a = soup.find(string='Characteristics').parent
b = a.find_next_sibling('td').get_text(" ")
b = b.encode("utf-8")
infor.append(b)
GSM.append(item.split("acc=")[1])
except:
pass
data = pd.DataFrame({'GSM number': GSM, 'Information':infor})
data.to_csv("GSE65216_Breast_information.csv")
###KEGG
###db
link = "http://www.genome.jp/dbget-bin/get_linkdb?-t+genes+path:sce01230"
page = requests.get(link)
soup = BeautifulSoup(page.content, 'lxml')
a = soup.find("pre")
b = a.find_all("a")
genes = []
for item in b:
ccc = item.get_text()
ccc = ccc.encode("utf-8")
ccc = ccc.split(":")[1].encode("utf-8")
genes.append(ccc)
data = pd.DataFrame({'Amino Acids':genes})
data.to_csv("AAMarker.csv")
|
import enum
from typing import (
List,
Tuple,
)
from slacktools.block_kit.base import BaseBlock
from slacktools.block_kit.types import (
ConfirmationDialogType,
DispatchActionType,
OptionGroupType,
OptionType,
)
class DispatchActions(enum.Enum):
on_enter_pressed = 0
on_character_entered = 1
class CompositionObjects(BaseBlock):
"""https://api.slack.com/reference/block-kit/composition-objects#confirm"""
@classmethod
def make_confirm_object(cls, title: str = 'Are you sure?', text: str = 'Are you sure you want to do this?',
confirm_text: str = 'Confirm', deny_text: str = 'Cancel', is_danger: bool = False) -> \
ConfirmationDialogType:
"""Makes a confirmation dialog object """
cls.perform_assertions({
'title': (title, 100),
'text': (text, 300),
'confirm': (confirm_text, 30),
'deny': (deny_text, 30),
})
confirm_obj = {
'title': cls.plaintext_section(text=title),
'text': cls.markdown_section(text=text),
'confirm': cls.plaintext_section(text=confirm_text),
'deny': cls.plaintext_section(text=deny_text),
'style': 'danger' if is_danger else 'primary'
}
return confirm_obj
@classmethod
def make_option_object(cls, text: str, value: str, description: str = None, url: str = None) -> OptionType:
cls.perform_assertions({
'text': (text, 75),
'value': (value, 75),
'description': (description, 75),
'url': (url, 3000)
})
option_obj = {
'text': cls.markdown_section(text=text),
'value': value
}
if description is not None:
option_obj['description'] = cls.plaintext_section(text=description)
if url is not None:
option_obj['url'] = url
return option_obj
@classmethod
def make_option_group(cls, label: str, options: List[Tuple]) -> OptionGroupType:
"""NB! Option groups are only used in select or multi-select menus"""
cls.perform_assertions({
'label': (label, 75),
'options ': (options, 100),
})
return {
'label': cls.plaintext_section(text=label),
'options': [cls.make_option_object(*x) for x in options]
}
@classmethod
def make_dispatch_action_configuration(cls, dispatch_actions_list: List[DispatchActions]) -> \
DispatchActionType:
return {
'trigger_actions_on': [x.name for x in dispatch_actions_list]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.