blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3220c6fedfbdef66c2d1bc9c9c4a39bc047ce8ae | 40c2bce56832d97797c115f60d1e0459fd4ebf93 | /Eclipse_Project_2/Section_1_5/database.py | 3c56f2e590f4f519ae6e0c1a2f4d52010d0af71a | [] | no_license | amanoj319319319/Eclipse_Python_LastSeleniumTest | 0be2e7f615160248f329b4df0e9d109612b29560 | 4d0978e4c2dfe9c3a9d4b429f7ff6340278c0252 | refs/heads/master | 2023-04-27T09:14:38.726807 | 2021-05-19T08:18:40 | 2021-05-19T08:18:40 | 267,038,244 | 0 | 0 | null | 2021-05-19T08:17:45 | 2020-05-26T12:35:36 | Python | UTF-8 | Python | false | false | 6,224 | py | #connecting to the database using database credentials and finding version of the database
'''
import cx_Oracle
con=cx_Oracle.connect('system/Manoj319319319')
if con!=None:
print ("successfully connected")
print ("Version is:-",con.version)
else:
print ("connection failed")
'''
#creating a table name in the database
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj319319319')
query="create table employees(eno number,ename varchar2(10),esal number(10,2))"
cursor=con.cursor()
cursor.execute(query)
print ("Table created succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#deleted a particular table name in the database
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj319319319')
query="drop table employees"
cursor=con.cursor()
cursor.execute(query)
print ("Table dropped succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#creating a table in the database
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj319319319')
query="create table employees(eno number,ename varchar2(10),esal number(10,2))"
cursor=con.cursor()
cursor.execute(query)
print ("Table created succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Inserting multiple values to the required paramters in the employees table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query = "insert into employees values(:eno,:ename,:esal)"
records=[(101,"manoj",10000),(102,"anki",20000),(103,"jyothi",30000)]
cursor.executemany(query,records)
con.commit()
print ("Record Inserted succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Reading input from the console
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
while True:
eno=int(input("Enter employee number:-"))
ename =(input("Enter employee name:-"))
esal = float(input("Enter employee salary:-"))
query = "insert into employees values(%d,'%s',%f)"
cursor.execute(query %(eno,ename,esal))
con.commit()
print ("Records Inserted succesfully")
option=input("Do you want to insert one more record[yes/no]")
if option == "no":
break
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Updating records in the database using SQL query
#The employees whose salary was less than 5000,i i had to increment Rs 1000 to their existing salary
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
increment=float(input("Enter increment amount:-"))
salaryrange=float(input("Enter salary range:-"))
query="update employees set esal=esal+%f where esal<%f"
cursor.execute(query %(increment, salaryrange))
con.commit()
print ("Records are updated successfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Deleting records from the employees table based on their salary ranges
#in the temployees table whose salary was greater than 5000 they were deleted from the table by me
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
cutoff=float(input("Enter cutoff amount:-"))
query="delete from employees where esal>%f"
cursor.execute(query %(cutoff))
con.commit()
print ("Records are deleted successfully")
except Exception as e:
print (e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
'''
'''
DDL coommands are ;;; table created , table dropped
DML Commnds are ;;;; insert operation , update operation , delete operation (for doing this ,
commit() method is must)
'''
#desc employees
#select * from employees;
#how to use fetchone() method to retrive data from the table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query="select * from employees"
cursor.execute(query)
row=cursor.fetchone()
while row is not None:
print(row)
row = cursor.fetchone()
except Exception as e:
if con:
con.rollback()
print ("There is a problem:-",e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
'''
#how to use fetchall() method to retrive data from the table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query="select * from employees"
cursor.execute(query)
rows=cursor.fetchall()
print (rows)
for row in rows:
print ("Employee number is:-",row[0])
print("Employee name is:-", row[1])
print("Employee salary is:-", row[2])
print ("***************")
except Exception as e:
if con:
con.rollback()
print ("There is a problem:-",e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
'''
#how to use fetchmany() method to retrive data from the table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query="select * from employees"
cursor.execute(query)
n=int(input("How many rows do you want:-"))
data = cursor.fetchmany(n)
for row in data:
print ("Employee number is:-",row[0])
print("Employee name is:-", row[1])
print("Employee salary is:-", row[2])
print ("***************")
except Exception as e:
if con:
con.rollback()
print ("There is a problem:-",e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
''' | [
"a.manoj16@gmail.com"
] | a.manoj16@gmail.com |
57c166495a5ba9c4d4d739bff152b1a67f6e3fea | 5ceea4106e0df754ae581c1f5e2d16082d7b6386 | /hackerRank/Algorithms/Implementation/bon-appetit.py | 0da733b5b6475b0511073b0a9b33e4e31f2c3664 | [] | no_license | vikramlance/Python-Programming | b0d4bd70145bfaa7a66434656c5970fbc57e8bd3 | 4094961e3c613e33f2d8a6d30281c60ed09d8c80 | refs/heads/master | 2022-06-17T00:58:50.646615 | 2022-06-03T03:39:35 | 2022-06-03T03:39:35 | 53,989,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | '''
https://www.hackerrank.com/challenges/bon-appetit
'''
n,k=raw_input().split()
n=int(n)
k=int(k)
a=map(int, raw_input().split())
b=int(raw_input())
if (2*b== (sum(a) - a[k])):
print "Bon Appetit"
else:
print ( b - ((sum(a) - a[k])//2))
| [
"noreply@github.com"
] | noreply@github.com |
113f6318406ab2f1f780a9300b4dcace49083e25 | 86bcf49fc5918e697bff33839164fab02bc68c83 | /preprocessor.py | ca6d6d9cdf69f94ff28a4b99c42f04b9426bcf44 | [] | no_license | eyosyaswd/honors-thesis | 845be484e0fdcddf03c5cefb5931f963e3879811 | 4d70fa09527e40d24250bfa75cf55b2760e97a59 | refs/heads/master | 2020-04-27T14:01:41.437075 | 2019-04-25T00:10:53 | 2019-04-25T00:10:53 | 174,393,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,036 | py | """
Script for preprocessing labelled tweets.
Input: csv file of with labelled tweets
Output: preprocessed and labelled tweets
"""
# from nltk.stem.snowball import SnowballStemmer
from nltk.stem.porter import PorterStemmer
import pandas as pd
import re
USE_STEMMER = True
def is_valid_word(word):
return (re.search(r'^[a-zA-Z][a-z0-9A-Z\._]*$', word) is not None)
# return word
def preprocess_word(word):
# remove punctuations
word = word.strip('\'"?!,.():;')
# convert more than 2 letter repetitions to 2 letters
# funnnnny --> funny
word = re.sub(r'(.)\1+', r'\1\1', word)
# remove - & '
word = re.sub(r'(-|\')', '', word)
# replace negative constructs with "not"
word = re.sub(r'(cant|dont|isnt|wont|hasnt|arent|aint|never)', 'not', word)
return word
def handle_emojis(tweet):
# Smile -- :), : ), :-), (:, ( :, (-:, :')
tweet = re.sub(r'(:\s?\)|:-\)|\(\s?:|\(-:|:\'\))', ' EMO_POS ', tweet)
# Laugh -- :D, : D, :-D, xD, x-D, XD, X-D
tweet = re.sub(r'(:\s?D|:-D|x-?D|X-?D)', ' EMO_POS ', tweet)
# Love -- <3, :*
tweet = re.sub(r'(<3|:\*)', ' EMO_POS ', tweet)
# Wink -- ;-), ;), ;-D, ;D, (;, (-;
tweet = re.sub(r'(;-?\)|;-?D|\(-?;)', ' EMO_POS ', tweet)
# Sad -- :-(, : (, :(, ):, )-:
tweet = re.sub(r'(:\s?\(|:-\(|\)\s?:|\)-:)', ' EMO_NEG ', tweet)
# Cry -- :,(, :'(, :"(
tweet = re.sub(r'(:,\(|:\'\(|:"\()', ' EMO_NEG ', tweet)
return tweet
def preprocess_tweet(tweet):
# print(tweet)
# convert all text to lowercase
tweet = tweet.lower()
# replace URLs with the word URL
tweet = re.sub(r'((www\.[\S]+)|(https?://[\S]+))', ' URL ', tweet)
# replace #hashtag with hashtag
tweet = re.sub(r'#(\S+)', r'\1', tweet)
# replace @handle with the word USER_MENTION
tweet = re.sub(r'@[\S]+', 'USER_HANDLE', tweet)
# strip away space, \, ', and "
tweet = tweet.strip(' \'"')
# Remove RT (retweet)
tweet = re.sub(r'\brt\b', '', tweet)
# replace emojis with EMO_POS or EMO_NEG
tweet = handle_emojis(tweet)
# replace multiple spaces with a single space
tweet = re.sub(r'\s+', ' ', tweet)
tweet_as_list = tweet.split()
preprocessed_tweet = []
for word in tweet_as_list:
word = preprocess_word(word)
if is_valid_word(word):
if USE_STEMMER:
# word = str(SnowballStemmer("english").stem(word))
word = str(PorterStemmer().stem(word))
preprocessed_tweet.append(word)
tweet = " ".join(preprocessed_tweet)
# print(tweet, "\n")
return tweet
def preprocess_df(tweets_df):
# iterate through all of the tweet texts in the dataframe and preprocess them
for index, row in tweets_df.iterrows():
tweets_df.at[index, "text"] = preprocess_tweet(row["text"])
if __name__ == "__main__":
# read labelled csv and covert it to a pandas dataframe
tweets_df = pd.read_csv("testing_preprocessing.csv")
# conduct preprocessing
preprocess_df(tweets_df)
| [
"eyosyaswd@outlook.com"
] | eyosyaswd@outlook.com |
2adea95d40fe265f37c11ef9b551f4dc668ac56c | ff5b7bae4cb9719463d532d00871bf30f55eb9e7 | /tests/tests.py | 915b61ec45956c69edb31f57f3478ba7a4b97cd6 | [] | no_license | alexanderbai1/Vida-Project | 76a514aa234c8b73c25d8b0e4c7b045831cbf10e | a509951abff6b7c211f4fc2a47a7e2f7b57dc1d5 | refs/heads/master | 2020-04-05T05:57:46.930921 | 2018-11-07T00:23:33 | 2018-11-07T00:23:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | import unittest
import sys
sys.path.append('../src') # gets into proper file path
from testthis import multiply,add
from condition import Condition
from symptom import Symptom
from users import User #users.py is pluralized because just user is another existing module.
# This is just test case, remove later
class TestUM(unittest.TestCase):
def setUp(self):
pass
def test_numbers_3_4(self):
self.assertEqual( multiply(3,4), 12)
def test_strings_a_3(self):
self.assertEqual( multiply('a',3), 'aaa')
def test_add_3_4(self):
self.assertEqual(add(3,4), 7)
def test_add_a_b(self):
self.assertEqual(add("a","b"), "ab")
class TestConditionClass(unittest.TestCase):
def setUp(self):
self.condition = Condition("Description", [], "name", -1, 0)
def test_get_symptoms(self):
self.assertEqual(self.condition.getSymptoms(), [])
class TestUserClass(unittest.TestCase):
def setUp(self):
self.user = User("username", "password", 1, 18)
def test_startAssessment(self):
self.assertEqual(self.user.startAssessment(), 0)
def test_logout(self):
self.assertEqual(self.user.logout(), 0)
def test_set_name(self):
self.user.setName("Patient1")
self.assertEqual("Patient1", self.user.getName())
def test_set_id(self):
self.user.setId(9999)
self.assertEqual(9999, self.user.getId())
def test_set_date_of_birth(self):
self.user.setDateOfBirth("06-13-1956")
self.assertEqual("06-13-1956", self.user.getDateOfBirth())
def test_set_height(self):
self.user.setHeight(180)
self.assertEqual(180, self.user.getHeight())
def test_set_weight(self):
self.user.setWeight(200)
self.assertEqual(200, self.user.getWeight())
def test_add_preexisting_condition(self):
cond = Condition("Runny nose and sneezing", [], "flu", 19283, 0)
self.assertEqual([], self.user.getPreExistingConditions())
self.user.addPreExistingCondition(cond)
self.assertEqual(cond, self.user.getPreExistingConditions()[0])
class TestSymptomClass(unittest.TestCase):
def setUp(self):
self.condition = Condition("Condition1", [], "name", -1, 0)
self.symptom = Symptom("name", 0, 0, [self.condition], [], "description")
def test_get_related_symptoms(self):
self.assertEqual(self.symptom.getRelatedSymptoms(), [])
def test_get_conditions(self):
self.assertEqual(self.symptom.getConditions(), [self.condition])
def test_get_desc(self):
self.assertEqual(self.symptom.getDesc(), "description")
if __name__ == '__main__':
unittest.main()
| [
"aham@cs.uchicago.edu"
] | aham@cs.uchicago.edu |
087cd3f2444c706d1d88702090f0b0625e335387 | 114ee9aa6b93e8b2a88ac0760dddfb931b4ed97e | /tests/adder_test.py | 0c652414de6396ef8660eebd12f7aba7506f6747 | [] | no_license | KoduIsGreat/vscode-python-unittest | 423cd874c8c5eba468207f671aeacd060c51ad56 | c63d688e94ea6144d72cf9f6bcbdaf9227fdd26b | refs/heads/master | 2020-04-17T12:40:25.512351 | 2019-01-19T20:57:29 | 2019-01-19T20:57:29 | 166,587,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from unittest import TestCase
from adder import simple_add
class AdderTestCase(TestCase):
def test_add(self):
assert 11 == simple_add(5,6)
assert 0 == simple_add(5,-5)
| [
"aashelt90@gmail.com"
] | aashelt90@gmail.com |
6c0e8ee0f458aebd6ee79924bb05b5a84b07865c | 5772286410ee9bff17f4b0303afc63bd2f464bbc | /Proyecto Codigo/Web Application/WebApp/WebMultimedia/views.py | 163b237d63203cc8c01187cdd04cb103996939d2 | [] | no_license | RoboticsLabURJC/2015-TFG-Walter-Cuenca | d39097c213d631ff195874459f97c27e93f09107 | 5d4e15bc28f6dea05850c3abae2bf07639c1906f | refs/heads/master | 2021-06-18T12:24:35.583862 | 2017-07-17T17:50:57 | 2017-07-17T17:50:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,632 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
#se puede quitar no se utiliza
from django.utils import timezone
from models import *
from forms import *
#importamos para trbjar con fechas
import datetime
## importamos los formularios #
from forms import *
## importamos la libreria de Python peticiones HTTP ##
import requests
#impottamos libreria para trabajar con formato JSON
import json
#importamos el ficheri setting
from django.conf import settings
from django.http import JsonResponse
from urllib3.contrib.appengine import AppEngineManager # ????????????
#comprobacion de la conectividad con attr de Django
from django.contrib.auth import login,logout,authenticate
#### Pagina Inicio APP ####
def PageInit(request):
print request.session.values()
#obtenemos la fecha de hoy
dateTimerHoy = datetime.datetime.now()
#obtenemos la fecha de hace 15 dias
dateTimerPass = datetime.datetime.now()
date=dateTimerPass.date();
numDias = datetime.timedelta(days=15);
datePass=dateTimerHoy-numDias
#print request.session.get('has_commented', False)
#request.session['fav_color'] = 'blue'
print request.session.items()
print '************************'
print 'Fecha actual'
print dateTimerHoy
print 'Fecha antigua'
print datePass
print '************************'
list_NewEvento=Evento.objects.filter(fechaModif__range=[datePass, dateTimerHoy])
print list_NewEvento
list_NewCantante=Cantante.objects.filter(fechaModif__range=[datePass, dateTimerHoy])
print list_NewCantante
context = {
'eventos':list_NewEvento,
'cantantes':list_NewCantante
}
return render(request, 'esqueletoWeb.html', context)
#### Individual Items Selection APP ####
def PageCantante(request,idCantante):
print '>>> Peticion Cliente Evento'
print idCantante
cantanteSelec = Cantante.objects.get(id=idCantante)
context = {
'cantanteSelec':cantanteSelec
}
return render(request, 'Artista.html', context)
def PageEvent(request,idEvento):
print '>>> Peticion Cliente Evento'
print idEvento
eventSelec = Evento.objects.get(id=idEvento)
formCantidad = itemsCountsForm()
context = {
'evento':eventSelec,
'form':formCantidad
}
return render(request, 'Evento.html', context)
#### Peticiones Asincronas ####
def SearchItem(request):
print '>>> Peticion buscador de la web'
text= request.POST['textRequest']
print text
infoCantante=Cantante.objects.filter(nombre__startswith=text)
infoEvento=Evento.objects.filter(nombre__startswith=text)
print infoEvento
contexto={
'ListEvento':infoEvento,
'ListCantante':infoCantante
}
return render(request,'desplegable.html',contexto)
def WSRequest(request):
'''
clave de API: AIzaSyBliq3S6sQ0pJsT1xWJDiMtPuM1sn9xzaM
'''
nameSite= request.POST['site']
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address='+nameSite+'&key=AIzaSyBliq3S6sQ0pJsT1xWJDiMtPuM1sn9xzaM')
print r
print r.text
# obtenemos formato del mensaje y la codificacion #
print r.headers
return HttpResponse(r.text)
def WSRequestSite(request):
'''
urls : https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=-33.8670,151.1957&radius=500&types=food&name=cruise&key=YOUR_API_KEY
'''
infoSite= request.POST['infoSite']
r = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?'+infoSite+'&key=AIzaSyBliq3S6sQ0pJsT1xWJDiMtPuM1sn9xzaM')
print r
print r.text
# obtenemos formato del mensaje y la codificacion #
print r.headers
return HttpResponse(r.text)
#### Formularios APP ####
def PageContact(request):
if request.method == 'POST':
formContacta = ContactaForm(request.POST)
#print formContacta
print formContacta.is_valid()
if formContacta.is_valid():
#pasamos obtener los campos del form
usuario = formContacta.cleaned_data['nombre']
email = formContacta.cleaned_data['correo']
tlf = formContacta.cleaned_data['numeroTlf']
area = formContacta.cleaned_data['area']
motivo = formContacta.cleaned_data['motivo']
newContact = Contacta(nombre=usuario,email=email,telefono=tlf,motivo=area,texto=motivo);
newContact.save()
'''u = User.objects.create_user(username=usuario,email=email,password=password)
#### Falta enviar la informacion de los errores cuando se producen
#### Y guardar la informaciond el area
u.save() #guardamos un usuario'''
print 'save form OK'
return HttpResponseRedirect('/WebMutimedia/')
else:
contacta = ContactaForm()
context = {
'form':contacta
}
print contacta
return render(request, 'contacta.html', context)
def PageRegister(request):
if request.method == 'POST':
formRegister = RegisterForm(request.POST)
print formRegister.is_valid()
if formRegister.is_valid():
#datos que guardamos en tabla User
usuario = formRegister.cleaned_data['nick']
email = formRegister.cleaned_data['correo']
nombre = formRegister.cleaned_data['nombre']
apellido = formRegister.cleaned_data['apellido']
password = formRegister.cleaned_data['password2']
password2 = formRegister.cleaned_data['password2']
#guardamos el usuario
newUser = User.objects.create_user(username=usuario,email=email,password=password,first_name=nombre,last_name=apellido)
newUser.save()
print newUser
#datos que se guardaran en el perfil del User
edad = formRegister.cleaned_data['edad']
tlf = formRegister.cleaned_data['telefono']
direccion = formRegister.cleaned_data['direccion']
sexo = formRegister.cleaned_data['sexo']
pais = formRegister.cleaned_data['pais']
provincia = formRegister.cleaned_data['provincia']
perfilNewUser = PerfilUser(usuario=newUser,telefono=tlf,direccion=direccion,sexo=sexo,pais=pais,provincia=provincia,edad=edad)
perfilNewUser.save()
print 'save form OK'
return HttpResponseRedirect('/WebMutimedia/')
else:
register = RegisterForm()
context = {
'form':register
}
print register
return render(request,'register.html',context)
def PageLogin(request): # OK
# no hace falta tener un request tenemos una respuesta POST #
if request.method == 'POST':
print '>>> Usuario intentando Login'
user = request.POST['usuario']
passw = request.POST['pass']
print request.POST
print 'Usuario:'+user
print 'Password:'+passw
data = {'nick': user, 'password': passw}
Formlogin = LoginForm(data)
if Formlogin.is_valid():
user = Formlogin.cleaned_data['nick']
password = Formlogin.cleaned_data['password']
usuario = authenticate(username=user,password=password) #en esta variable veremos si esta atenticado
if usuario is None:
print 'usuario no registrado'
mensaje = False
return HttpResponse(mensaje)
else:
print 'usuario registrado realizamos login'
mensaje = True
login(request, usuario)
return HttpResponse(mensaje)
def PagePerfil(request):
nameUser = request.user
print nameUser.email
infoUser = User.objects.get(username=nameUser)
perfilUser= PerfilUser.objects.get(usuario=infoUser)
print perfilUser.telefono
print perfilUser.usuario.last_name
print perfilUser.listCompra
if request.method == 'POST':
print 'envio form + datos'
context = {
'infoUser': perfilUser
}
return render(request,'perfil.html',context)
def ModifPerfil(request):
nameUser = request.user
print nameUser.email
infoUser = User.objects.get(username=nameUser)
perfilUser= PerfilUser.objects.get(usuario=infoUser)
print perfilUser
print perfilUser.telefono
print PersonaForm
''' Generamos un formulario del tipo del modelo '''
formUser = UserForm(instance=infoUser)
formPerfil = PersonaForm(instance=perfilUser)
print formPerfil
context = {
'infoPerfil':formPerfil
}
return render(request,'perfil.html',context)
def PageLogout(request):
logout(request)
return HttpResponseRedirect('/WebMutimedia/')
'''
VISTAS RELACIONADAS CON EL CARRITO DE LA COMPRA
BASADA EN LA SESION DE LOS USUARIOS
'''
def AddCarShop(request,idEvento,idTicket):
#obtenemos la cantidad de entradas
print request
print request.POST
print 'num Entradas'
numEntradas = request.POST['quantity']
print numEntradas
print 'addCar elementos'
#print nameEvento
v_Evento = Evento.objects.get(id=idEvento)
ticket = Entradas.objects.get(id=idTicket)
#calculamos el precio por entrada
print 'Info Ticket'
precio = ticket.precio
print precio
tipoTicket = ticket.tipoEntrada
print tipoTicket
print 'Precio total compra'
Total = int(numEntradas)*precio
print Total
'''
obtenemos:
precio
cantidad : cuerpo del form
evento : nombre de momento
'''
#pasamos a aadirlo a nuestra session
print request.session
#self,idProducto,idTicket,cantidad,total
cart = CartShop(request)
cart.add(v_Evento.nombre,str(v_Evento.imgCartel),idTicket,tipoTicket,numEntradas,Total)
return HttpResponseRedirect('/WebMutimedia/Eventos/'+idEvento+'/')
def DetailCarShop(request):
# /* ontenemos la informacion del carrito */
objCart = CartShop(request)
formCantidad = itemsCountsForm(initial={'update': True})
print formCantidad
'''
cart= objCart.cart
valueCar=objCar.values() {}
for key, value in objCart.cart:
event = Evento.objects.get(name=value['name'])
value['link']
'''
context = {'objCart':objCart,'formCantidad':formCantidad}
return render(request,'CartDetail.html',context)
def UpdateCart(request,idEvento,idTicket):
#pasamos a actualizar la cantidad de unidades
newNumEntradas = request.POST['quantity']
print newNumEntradas
#obtenemos la inforacion necesaria del evento y lo demas
ticket = Entradas.objects.get(id=idTicket)
v_Evento = Evento.objects.get(id=idEvento)
print ticket
Total = int(newNumEntradas)*ticket.precio
print Total
#SIguiente pasamos a encontrar la clave
instCart = CartShop(request)
#v_Cart = instCart.cart
nameKey = instCart.getNameKey(v_Evento.nombre,idTicket)
print nameKey
instCart.update(nameKey,newNumEntradas,Total)
return HttpResponseRedirect('/WebMutimedia/DetailCar/')
def deleteItemCar(request,idEvento,idTicket):
instCart = CartShop(request)
v_Cart = instCart.cart
v_Evento = Evento.objects.get(id=idEvento)
nameKey = instCart.getNameKey(v_Evento.nombre,idTicket)
print nameKey
instCart.remove(nameKey)
return HttpResponseRedirect('/WebMutimedia/DetailCar/')
def Pagecheckout(request):
objCart = CartShop(request)
v_cart = objCart.cart
if request.method == 'POST':
print 'terminar compra'
print request.POST
#validamos que el formulario sea correcto
nombre=request.POST['nombre']
apellido=request.POST['apellido']
email=request.POST['email']
telefono=request.POST['telefono']
direccion=request.POST['direccion']
cp=request.POST['codigo_postal']
pais=request.POST['pais']
data={
'nombre': nombre,
'apellido': apellido,
'email':email,
'telefono':telefono,
'direccion':direccion,
'codigo_postal':cp,
'pais':pais
}
formOrder=OrdenForm(data)
print formOrder
print formOrder.is_valid()
if formOrder.is_valid() :
#si es valido pasamos a guardar la informacion en BBDD
orden=Order(nombre=nombre,apellido=apellido,email=email,telefono=telefono,direccion=direccion,codigo_postal=cp,pais=pais)
orden.save()
itemOrden = orderItems(order=orden)
itemOrden.save()
for item in objCart.cart.values():
#buscamos el evento den BBDD
event=Evento.objects.get(nombre=item['name'])
itemOrden.producto.add(event)
itemOrden.save()
#asignamos el usuario de la compra
print request.user
return HttpResponseRedirect('/WebMutimedia/')
else:
formOrder = OrdenForm()
context = {
'form':formOrder,
'objCart':objCart
}
return render(request,'Orden.html',context)
| [
"cuenca306@hotmail.com"
] | cuenca306@hotmail.com |
cb3ae9ee7f231abcc094e426310c92e1f9b96e04 | 70d5ac89833250f642737cfd52c2e5977ada94f7 | /RabbitMQ/Older/Cola/serverEmitter.py | 77b2b85070b521fa36711c324e8c697855f34624 | [] | no_license | Plozano94/CupulaCiclope | 7b41d3fd0fe3e938a6aba104dcb9b64475d39229 | 4e22ee70210916881950bc7010e75819a21a31f8 | refs/heads/master | 2020-04-15T15:21:39.600058 | 2017-03-28T09:20:14 | 2017-03-28T09:20:14 | 46,656,430 | 0 | 1 | null | 2017-03-28T08:26:34 | 2015-11-22T10:53:50 | Python | UTF-8 | Python | false | false | 979 | py | #!/usr/bin/env python
import pika
import sys
import config as c
#!/usr/bin/env python
import pika
import sys
credentials = pika.PlainCredentials('cupula', '1234')
connection = pika.BlockingConnection(pika.ConnectionParameters(
c.dictIP['servidorIP'],
5672,
'Cupula',
credentials))
channel = connection.channel()
targ=sys.argv[1]
rest=sys.argv[2]
if rest.find("-direct")!=-1:
channel.queue_declare(queue=targ+"_queue",durable=True)
rest=rest.split()
rest.remove("-direct")
rest=' '.join(rest)
channel.basic_publish(exchange='',routing_key=targ+"_queue",body=rest)
else:
my_dict={}
d={}
for x in c.listaNodos:
n='c.lista'+x
my_dict[x]=n
for x in eval(my_dict[targ]):
channel.queue_declare(queue=x+"_queue",durable=True)
message = ' '.join(sys.argv[2:])
channel.basic_publish(exchange='',routing_key=x+"_queue",body=message)
print " [x] Sent "+ rest
connection.close()
| [
"plozano94@gmail.com"
] | plozano94@gmail.com |
3c8d65c57a7bdbf95d8cdf533395ad17aa5f6a99 | 03e115c1937ec7bd1e249f82db0225828eaaa186 | /2-GUI (tkinter)/3imagenes.py | 5d2a2f4a214587d17e084764b7496fb9400deb31 | [] | no_license | mivargas/Master-python | 236c04205637ddd44d1cc879de2b7c48418153f9 | 9d1c04a8d658aa0dd8620ed792fa2133adfa57e7 | refs/heads/master | 2023-03-06T13:35:58.177058 | 2021-02-16T00:06:00 | 2021-02-16T00:06:00 | 321,731,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from tkinter import *
from PIL import Image, ImageTk
ventana = Tk()
ventana.geometry("700x500")
Label(ventana, text="HOLA SOY MIGUEL").pack(anchor=W)
imagen = Image.open("./imagenes/labsxd.png")
render = ImageTk.PhotoImage(imagen)
Label(ventana, image=render).pack(anchor=E)
ventana.mainloop() | [
"miguelvargas619@gmail.com"
] | miguelvargas619@gmail.com |
678de7947999a1010a6b2498d3aff6924a93146e | ec0276755c59c915596554c557cf9e89c80b52d8 | /ami/blueprints/contact/__init__.py | c576479e28ceba91b50d49fe1c8f9d89840d2819 | [] | no_license | ahmedozmaan/SamplePythonProject | 9cb1d756d252ce6477a442487c939b282ed92e34 | 101c530c824f14befcca742c17dc9ce501c65a3d | refs/heads/master | 2020-03-24T21:57:21.904341 | 2018-08-20T13:50:11 | 2018-08-20T13:50:11 | 143,059,280 | 0 | 1 | null | 2018-08-20T13:50:12 | 2018-07-31T19:39:25 | Python | UTF-8 | Python | false | false | 49 | py | from ami.blueprints.contact.views import contact
| [
"ahmedozman@gmail.com"
] | ahmedozman@gmail.com |
598c8ca277952e17b7940afec16c265bda1a481b | e53c3b0d99a09bef379569f85c3ca80ce05d17ee | /Algorithm/recursive/recursive.py | 5eeb807d0cb9a09fc6cd117ea3f966d7008dcd9c | [] | no_license | biolchen/biologistLearningPython | c0d27246abc0795346a0dc61792b1c38748faa4d | 267eb455e096e01f851a99e0afe7057a0cf7f53e | refs/heads/master | 2022-06-28T21:03:57.608054 | 2020-05-10T06:19:11 | 2020-05-10T06:19:11 | 259,196,134 | 2 | 1 | null | 2020-05-10T02:53:44 | 2020-04-27T03:35:04 | HTML | UTF-8 | Python | false | false | 772 | py | ##
'''
1137. N-th Tribonacci Number
The Tribonacci sequence Tn is defined as follows:
T0 = 0, T1 = 1, T2 = 1, and Tn+3 = Tn + Tn+1 + Tn+2 for n >= 0.
Given n, return the value of Tn.
'''
class Solution:
def tribonacci(self, n):
if n == 0:
return 0
if n<3:
return 1
else:
return self.tribonacci(n-1)+self.tribonacci(n-2)+self.tribonacci(n-3)
Solution().tribonacci(27)
##
class Solution:
def tribonacci(self, n):
l = [0,1,1]
if n < len(l):
return l[n]
else:
for i in range(3,n+1):
next_num = l[i-1]+ l[i-2]+ l[i-3]
l.append(next_num)
return l.pop()
Solution().tribonacci(25)
##
##
Solution().findContestMatch
| [
"18598014+biolchen@users.noreply.github.com"
] | 18598014+biolchen@users.noreply.github.com |
e1f72a82a0cf3e435e936c1e2a6bf0e91dc7d85a | 031a5e28f91c9b357a068ad102dfe93f4cbf0c57 | /DecisionTree/test.py | a9c3f5eaca8921993de3868a87179a918f19d3a9 | [] | no_license | GitOfCharlie/ML-experiment | f28a6932e3952fa4eeff4c2692fa1e1d36dbc9ec | 5c7c966b1c7e66ee0b3104d7200facb36e78867f | refs/heads/master | 2022-04-16T17:24:19.585575 | 2020-04-20T06:02:25 | 2020-04-20T06:02:25 | 243,219,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import numpy as np
# print(np.log(2))
# print(np.log2(2))
# print(np.log10(2))
print(float(8/3) - float(5/6)*np.log2(5) + float(3/8)*np.log2(3))
print(float(1/3) * (6.5 - np.log2(5) - 0.75*np.log2(3))) | [
"1057206466@qq.com"
] | 1057206466@qq.com |
5c02e94311a37dbaf15d56d180884328cdaf081d | 761a20a79420bc6da491c5a487f6cf218f598b66 | /DemoTest/graphicsTkinter003.py | d8eb0e19c8eb84a4b963313d1955a8126b63903c | [] | no_license | namexiaohuihui/linuxlogin | 50b21e247d2e06c479907aa2f94f4b5979b4025d | ad8ffce5e87624f40f89eedc0229ba70cd66699b | refs/heads/master | 2020-03-24T16:56:06.313441 | 2018-08-03T14:06:48 | 2018-08-03T14:06:48 | 142,843,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # -*- coding: utf-8 -*-
"""
@__author__ :DingDong
@file: graphicsTkinter001.py
@time: 2018/5/17 21:37
@Entry Name:operating
"""
from tkinter import *
from tkinter.messagebox import showinfo
def reply(name):
showinfo(title='弹窗',message='你的名字: %s !' % name)
top = Tk()
top.title('账号登陆')
# top.iconbitma('1178420.gif')
Label(top,text='请输入你的名字 :').pack(side=TOP)
ent = Entry(top)
ent.pack(side=TOP)
btn = Button(top,text='登陆',command=lambda :reply(ent.get()))
btn.pack(side=TOP)
top.mainloop()
| [
"704866169@qq.com"
] | 704866169@qq.com |
cdd694f1c152d8a67387552fbee357926d127b69 | bb073c4ac2e28bda1d95dee5e0af5181b7f3f564 | /blog/schema/blog_schema.py | 6d66ce4e07155645fce5fa2ee029b8bac26607ce | [] | no_license | Navyashree-h-c/Training | 982b4ca78b0cac2f962691423415dcb092dc7075 | d7d550037d9c2cc1d73c8ad93cca0f450e626e13 | refs/heads/master | 2023-01-11T11:01:45.036811 | 2020-11-05T10:26:02 | 2020-11-05T10:26:02 | 309,942,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | import graphene
from graphene_django.types import DjangoObjectType
from blog.models import Blog as BlogModel
class Blog(DjangoObjectType):
class Meta:
model = BlogModel
| [
"navyashree@redintegro.com"
] | navyashree@redintegro.com |
415189fbe03085b52ba901c3699c66ba2f13e2d1 | 9021ed14cd07ddfa16f339de91bead3ea335b0d1 | /web/Logic/logic.py | 7d9df59f985ea9f66a5924574b615d9af4bbfe0c | [] | no_license | mali86007/xuemc | 0d443c5effd8b8c453bb5a22154dd5132a3c69c3 | 244d379f07c8b6973377bec0afe7b3d4fdfbe226 | refs/heads/master | 2020-04-08T15:54:06.493310 | 2018-12-03T08:30:56 | 2018-12-03T08:30:56 | 159,496,588 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,627 | py | import sys,os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'../..'))
# from imp import reload
# reload(sys)
# sys.setdefaultencoding('utf-8')
from DB import orm
from web.forms import SchoolForm, InstitutionForm, BulletinForm, AccountForm
from web.Logic import restful
g_choices_area = [(g.id, g.name) for g in orm.Area.query.order_by('name')]
g_choices_schooltype = [(g.id, g.name) for g in orm.Schooltype.query.order_by('name')]
g_choices_feature = [(g.id, g.name) for g in orm.Feature.query.order_by('name')]
g_choices_agespan = [(g.id, g.name) for g in orm.Agespan.query.order_by('name')]
g_choices_feetype = [(g.id, g.name) for g in orm.Feetype.query.order_by('name')]
def GetSchoolFormById(school_id):
school = orm.School.query.get(int(school_id))
if school is None: return None
schoolform = SchoolForm()
schoolform.id.data = school.id
schoolform.name.data = school.name
schoolform.area_id.data = school.area_id
schoolform.area_name = school.area.name
schoolform.teachdesc.data = school.teachdesc
schoolform.address.data = school.address
schoolform.schooltype_id.data = school.schooltype_id
schoolform.schooltype_name = school.schooltype.name
schoolform.website.data = school.website
schoolform.distinguish.data = school.distinguish
schoolform.leisure.data = school.leisure
schoolform.threashold.data =school.threashold
schoolform.partner.data = school.partner
schoolform.artsource.data = school.artsource
schoolform.feedesc.data = school.feedesc
schoolform.longitude.data =school.longitude
schoolform.latitude.data = school.latitude
schoolform.schoolimages = school.schoolimages
schoolform.feature_ids.data = [x.feature_id for x in school.schoolfeatures]
schoolform.area_id.choices = g_choices_area
schoolform.schooltype_id.choices = g_choices_schooltype
schoolform.feature_ids.choices = g_choices_feature
return schoolform
def SetSchoolFeatures(school_id, feature_ids):
for x in orm.SchoolFeature.query.filter_by(school_id=school_id).all():
orm.db.session.delete(x)
for x in feature_ids:
sf = orm.SchoolFeature(school_id, x)
orm.db.session.add(sf)
orm.db.session.commit()
def SetInstitutionFeatures(institution_id, feature_ids):
for x in orm.InstitutionFeature.query.filter_by(institution_id=institution_id).all():
orm.db.session.delete(x)
for x in feature_ids:
sf = orm.InstitutionFeature(institution_id, x)
orm.db.session.add(sf)
orm.db.session.commit()
def GetInstitutionFormById(institution_id):
institution = orm.Institution.query.get(int(institution_id))
if institution is None: return None
institutionform = InstitutionForm()
institutionform.id.data = institution.id
institutionform.name.data = institution.name
institutionform.agespan_id.data = institution.agespan_id
institutionform.agespan_name = institution.agespan.name
institutionform.area_id.data = institution.area_id
institutionform.area_name = institution.area.name
institutionform.address.data = institution.address
institutionform.location.data = institution.location
institutionform.website.data = institution.website
institutionform.telephone.data = institution.telephone
institutionform.feedesc.data = institution.feedesc
institutionform.timeopen.data =institution.timeopen
institutionform.timeclose.data = institution.timeclose
institutionform.feetype_id.data = institution.feetype_id
institutionform.feetype_name = institution.feetype.name
institutionform.longitude.data =institution.longitude
institutionform.latitude.data = institution.latitude
institutionform.institutionimages = institution.institutionimages
institutionform.feature_ids.data = [x.feature_id for x in institution.institutionfeatures]
institutionform.area_id.choices = g_choices_area
institutionform.feature_ids.choices = g_choices_feature
institutionform.agespan_id.choices =g_choices_agespan
institutionform.feetype_id.choices = g_choices_feetype
return institutionform
def GetBulletinFormById(bulletin_id):
bulletin = orm.Bulletin.query.get(int(bulletin_id))
if bulletin is None: return None
bulletinform = BulletinForm()
bulletinform.id.data = bulletin.id
bulletinform.title.data = bulletin.title
bulletinform.content.data = bulletin.content
bulletinform.dt.data = bulletin.dt
bulletinform.valid.data = bulletin.valid
bulletinform.source.data = bulletin.source
bulletinform.author.data = bulletin.author
return bulletinform
def GetAccountFormById(account_id):
account = orm.Account.query.get(int(account_id))
if account is None: return None
accountform = AccountForm()
accountform.id.data = account.id
accountform.username.data = account.username
accountform.password.data = account.password
accountform.name.data = account.name
accountform.telephone.data = account.telephone
accountform.flag_telephone.data = True if account.flag_telephone >0 else False
accountform.checkcode.data = account.checkcode
accountform.source.data = account.source
accountform.dtcreate.data = account.dtcreate
return accountform
def LoadBasePageInfo(pagename, pagetask, form):
form.pagename = pagename;
form.pagetask = pagetask;
form.school_count = orm.School.query.count()
form.institution_count = orm.Institution.query.count()
form.bulletin_count = orm.Bulletin.query.count()
form.account_count = orm.Account.query.count()
def SetDefaultImage(obj):
"""设置默认图片"""
if obj.has_key(restful.ITEM_BULLETINIMAGES): # 如果是公告资源
listimage = obj.get(restful.ITEM_BULLETINIMAGES,[])
if len(listimage)<=0:
listimage.append({restful.ITEM_ID:0,restful.ITEM_FILE:'default_bulletinimage.jpg'})
if obj.has_key(restful.ITEM_INSTITUTIONIMAGES): # 如果是培训机构资源
listimage = obj.get(restful.ITEM_INSTITUTIONIMAGES,[])
if len(listimage)<=0:
listimage.append({restful.ITEM_ID:0,restful.ITEM_FILE:'default_institutionimage.jpg'})
if obj.has_key(restful.ITEM_SCHOOLIMAGES): # 如果是学校资源
listimage = obj.get(restful.ITEM_SCHOOLIMAGES,[])
if len(listimage)<=0:
listimage.append({restful.ITEM_ID:0,restful.ITEM_FILE:'default_schoolimage.jpg'})
if __name__ == '__main__':
print (orm.Area.query.get(1))
| [
"malj007@tom.com"
] | malj007@tom.com |
cc2c7be217f85d5e43d8832d6140484fb1882134 | 0d56b9ee2bb43a9ee4d8a71eeebd9e648438c807 | /agent.py | 56d5979029d43959e31ade1a8fb1f5cbef965c2a | [] | no_license | melmarsezio/Nine-Board-Tic-Tac-Toe | 8726fc4958d12c5cda9bbc53f0537914349444bd | aa9594aba9e24219d6a3e0e55fa78ab7b6c2af17 | refs/heads/master | 2021-04-21T01:33:59.532104 | 2020-03-27T17:34:37 | 2020-03-27T17:34:37 | 249,736,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,137 | py | #!/usr/bin/python3
# Sample starter bot by Zac Partridge
# Contact me at z.partridge@unsw.edu.au
# 06/04/19
# Feel free to use this and modify it however you wish
import socket
import sys
import numpy as np
import math
import collections
# a board cell can hold:
# 0 - Empty
# 1 - I played here
# 2 - They played here
#class Heuristic:
# def __init__(self):
# self.a = None
# self.b = None
# self.c = None
# self.d = None
# self.e = None
# self.f = None
# self.g = None
# self.h = None
# self.i = None
# the boards are of size 10 because index 0 isn't used
boards = np.zeros((10, 10), dtype="int8")
s = [".","X","O"]
S = [None,'a','b','c','d','e','f','g','h','i']
curr = 0 # this is the current board to play in
search_depth = 4
#scale = [0,2,1,2,1,3,1,2,1,2]
#heuristic = Heuristic()
# print a row
# This is just ported from game.c
def print_board_row(board, a, b, c, i, j, k):
print(" "+s[board[a][i]]+" "+s[board[a][j]]+" "+s[board[a][k]]+" | " \
+s[board[b][i]]+" "+s[board[b][j]]+" "+s[board[b][k]]+" | " \
+s[board[c][i]]+" "+s[board[c][j]]+" "+s[board[c][k]])
# Print the entire board
# This is just ported from game.c
def print_board(board):
print_board_row(board, 1,2,3,1,2,3)
print_board_row(board, 1,2,3,4,5,6)
print_board_row(board, 1,2,3,7,8,9)
print(" ------+-------+------")
print_board_row(board, 4,5,6,1,2,3)
print_board_row(board, 4,5,6,4,5,6)
print_board_row(board, 4,5,6,7,8,9)
print(" ------+-------+------")
print_board_row(board, 7,8,9,1,2,3)
print_board_row(board, 7,8,9,4,5,6)
print_board_row(board, 7,8,9,7,8,9)
print(" ------+-------+------")
print(" ------+-------+------")
print()
# choose a move to play
def play():
print_board(boards)
n = alpha_beta(curr, search_depth, 1) # 1 means its us to play, in alpha_beta recursively times -1 to indicate whos turn
place(curr, n, 1)
return n
def killer_move(cell, target):
global killermoves
killermoves = []
if (boards[cell][1:4] == [0,target,target]).all() or\
(boards[cell][1::3] == [0,target,target]).all() or\
(boards[cell][1::4] == [0,target,target]).all():
killermoves.append(1)
elif (boards[cell][1:4] == [target,0,target]).all() or\
(boards[cell][2::3] == [0,target,target]).all():
killermoves.append(2)
elif (boards[cell][1:4] == [target,target,0]).all() or\
(boards[cell][3::3] == [0,target,target]).all() or\
(boards[cell][3:8:2] == [0,target,target]).all():
killermoves.append(3)
elif (boards[cell][4:7] == [0,target,target]).all() or\
(boards[cell][1::3] == [target,0,target]).all():
killermoves.append(4)
elif (boards[cell][4:7] == [target,0,target]).all() or\
(boards[cell][2::3] == [target,0,target]).all() or\
(boards[cell][3:8:2] == [target,0,target]).all() or\
(boards[cell][1::4] == [target,0,target]).all():
killermoves.append(5)
elif (boards[cell][4:7] == [target,target,0]).all() or\
(boards[cell][3::3] == [target,0,target]).all():
killermoves.append(6)
elif (boards[cell][7:] == [0,target,target]).all() or\
(boards[cell][1::3] == [target,target,0]).all() or\
(boards[cell][3:8:2] == [target,target,0]).all():
killermoves.append(7)
elif (boards[cell][7:] == [target,0,target]).all() or\
(boards[cell][2::3] == [target,target,0]).all():
killermoves.append(8)
elif (boards[cell][7:] == [target,target,0]).all() or\
(boards[cell][3::3] == [target,target,0]).all() or\
(boards[cell][1::4] == [target,target,0]).all():
killermoves.append(9)
def alpha_beta(cell, depth, player, alpha = -math.inf, beta = math.inf):
killer_move(cell,1)
#print(f'My potential killer moves: {killermoves}') if depth == search_depth else 0
if depth == search_depth and killermoves:
return killermoves[0]
child_nodes = killermoves
length = len(child_nodes)
child_nodes.extend([i for i in range(1,10) if boards[cell][i]== 0 and not (i in child_nodes)])
#print(f'Current Available moves: {child_nodes}') if depth == search_depth else 0
remove_list = []
for i in child_nodes:
killer_move(i,-1)
if killermoves and child_nodes.index(i)>=length:
remove_list.append(i)
#print(f'Current opponent killer moves: {remove_list}') if depth == search_depth else 0
for i in remove_list:
child_nodes.remove(i)
#print(f'After elimination, my available moves left: {child_nodes}') if depth == search_depth else 0
#if depth == search_depth:
# killer_move(cell,-1)
# for i in killermoves:
# if i in child_nodes:
# return i
if depth == 0 or not child_nodes or winning(cell, -player):
evaluate(cell)
#for index in range(1,10):
# total = 0
# if index != cell:
# total += heuristic.__dict__[S[index]]
# else:
# total += evaluate(cell)
#return total if depth != search_depth else remove_list[0]
return value if depth != search_depth else remove_list[0]
elif player > 0:
for i in child_nodes:
fake_place(cell, i, player)
#if depth == 1:
# for index in range(1,10):
# heuristic.__dict__[S[index]] = evaluate(index)
if depth != search_depth:
alpha = max(alpha, alpha_beta(i, depth-1, -player, alpha, beta))
else:
new_alpha = alpha_beta(i, depth-1, -player, alpha, beta)
if new_alpha > alpha:
alpha, move = new_alpha, i
unplace(cell, i)
if alpha >= beta:
return alpha
return alpha if depth != search_depth else move
else:
for i in child_nodes:
fake_place(cell, i, player)
#if depth == 1:
# for index in range(1,10):
# heuristic.__dict__[S[index]] = evaluate(index)
beta = min(beta, alpha_beta(i, depth-1, -player, alpha, beta))
unplace(cell, i)
if beta <= alpha:
return beta
return beta
return 'Something went wrong!!!'
def evaluate(i):
global x, value, X2, X1, O2, O1
X2 = 0; X1 = 0; O2 = 0; O1 = 0;
value = 0
x = collections.Counter(boards[i][1:4])
sub_evaluate()
x = collections.Counter(boards[i][4:7])
sub_evaluate()
x = collections.Counter(boards[i][7:])
sub_evaluate()
x = collections.Counter(boards[i][1::3])
sub_evaluate()
x = collections.Counter(boards[i][2::3])
sub_evaluate()
x = collections.Counter(boards[i][3::3])
sub_evaluate()
x = collections.Counter(boards[i][1::4])
sub_evaluate()
x = collections.Counter(boards[i][3:8:2])
sub_evaluate()
value += 3*X2+X1-(20*O2+5*O1)
return value
def sub_evaluate():
global value, X2, X1, O2, O1
if x[1] == 2 and x[0] == 1:
X2 += 1
elif x[-1] == 2 and x[0] == 1:
O2 += 1
elif x[0] == 2 and x[1] == 1:
X1 += 1
elif x[0] == 2 and x[-1] == 1:
O1 += 1
elif x[1] == 3:
value += 1000
elif x[-1] == 3:
value -= 10000
#def sub_evaluate():
# global value
# if x[1] == 2 and x[0] == 1:
# value += 300
# elif x[-1] == 2 and x[0] == 1:
# value -= 300
# elif x[0] == 2 and x[1] == 1:
# value += 10
# elif x[0] == 2 and x[-1] == 1:
# value -= 10
# elif x[1] == 3:
# value += 10000
# elif x[-1] == 3:
# value -= 10000
# elif x[0] == 3:
# value += 1
def winning(cell, player):
if (boards[cell][1:4] == [player,player,player]).all() or (boards[cell][4:7] == [player,player,player]).all() or\
(boards[cell][7:] == [player,player,player]).all() or (boards[cell][1::3] == [player,player,player]).all() or\
(boards[cell][2::3] == [player,player,player]).all() or (boards[cell][3::3] == [player,player,player]).all() or\
(boards[cell][1::4] == [player,player,player]).all() or (boards[cell][3:8:2] == [player,player,player]).all():
return True
return False
def fake_place(cell, num, player):
boards[cell][num] = player
def unplace(cell, i):
boards[cell][i] = 0
# place a move in the global boards
def place(board, num, player):
global curr
curr = num
boards[board][num] = player
# read what the server sent us and
# only parses the strings that are necessary
def parse(string):
if "(" in string:
command, args = string.split("(")
args = args.split(")")[0]
args = args.split(",")
else:
command, args = string, []
if command == "second_move":
place(int(args[0]), int(args[1]), -1)
return play()
elif command == "third_move":
# place the move that was generated for us
place(int(args[0]), int(args[1]), 1)
# place their last move
place(curr, int(args[2]), -1)
return play()
elif command == "next_move":
place(curr, int(args[0]), -1)
return play()
elif command == "win":
print_board(boards)
print("Yay!! We win!! :)")
return -1
elif command == "loss":
print_board(boards)
print("We lost :(")
return -1
elif command == "draw":
print_board(boards)
print("Draw game :|")
return -1
return 0
# connect to socket
def main():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = int(sys.argv[2]) # Usage: ./agent.py -p (port)
s.connect(('localhost', port))
while True:
text = s.recv(1024).decode()
if not text:
continue
for line in text.split("\n"):
response = parse(line)
if response == -1:
s.close()
return
elif response > 0:
s.sendall((str(response) + "\n").encode())
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
24facbeaca566bc44cf1b172a6621bba9a176dc4 | 7d111338d8d2aba0b6dba919a4d35cf8de6b452e | /top_getter.py | aab4bd0c9064c93bb5e874ad5bf043f35b852a66 | [] | no_license | d07s1d0s4d1/alphatron | e3cedd6bc3eebfa243a976b7157a0b49948b0c2f | 7f2e98c31703b33d06dad8e35ed87d1e8db62d8a | refs/heads/master | 2020-03-14T22:19:23.219803 | 2018-05-02T08:10:13 | 2018-05-02T08:10:13 | 131,818,795 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import top_configs
import db_handler
import sending
class Singleton(object):
_instance = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
class TopGetter(Singleton, object):
def __init__(self):
self.db = db_handler.DBHandler()
self.tops = {}
def get_top_list(self, top_name):
if top_name not in self.tops:
top = []
top_sendings = list(self.db.iterate_over_top_alpha(top_configs.top_configs[top_name]['top_table_name']))
for sending_ in top_sendings:
top.append(sending.Sending(sending_['sendingID']))
self.tops[top_name] = top
return self.tops[top_name]
| [
"dsd_kem@mail.ru"
] | dsd_kem@mail.ru |
f70b3475156dcaf6eded504c6366ffb479decd07 | 8e43b034459532934a04bd8f28468805191909b9 | /k_means.py | 621dc1dc34ed010a537e42b0d4e55010ccd74c8e | [] | no_license | Linar23/Machine_learning | be29a00a623214b89a9c9c9069f8f9bc3ec8087a | cf4cbc41205eaf2171c5c84f89ca2819bdb9c56c | refs/heads/master | 2020-12-30T16:16:03.380955 | 2017-09-12T12:42:56 | 2017-09-12T12:42:56 | 89,281,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | import numpy as np
from matplotlib import pyplot as plt
from numpy import linalg
x1 = 7.5 + 2 * np.random.randn(100)
y1 = 5 + 2 * np.random.randn(100)
x2 = 2.5 + 2 * np.random.randn(100)
y2 = 15 + 2 * np.random.randn(100)
x3 = 12.5 + 2 * np.random.randn(100)
y3 = 15 + 2 * np.random.randn(100)
x = np.concatenate((x1,x2,x3),axis=0)
y = np.concatenate((y1,y2,y3),axis=0)
plt.figure()
plt.title('Исходные данные')
plt.plot(x1,y1,'.r')
plt.plot(x2,y2,'.r')
plt.plot(x3,y3,'.r')
data_set = np.column_stack((x,y))
c1 = np.concatenate((10 + 2 * np.random.randn(1),5 + 2 * np.random.randn(1)))
c2 = np.concatenate((2.5 + 2 * np.random.randn(1),15 + 2 * np.random.randn(1)))
c3 = np.concatenate((12.5 + 2 * np.random.randn(1),15 + 2 * np.random.randn(1)))
result = np.zeros(data_set[:,0].size)
for i in range(0,100):
first = 0
second = 0
third = 0
sum1_x = 0
sum2_x = 0
sum3_x = 0
sum1_y = 0
sum2_y = 0
sum3_y = 0
for i in range(0,data_set[:,0].size):
min1 = linalg.norm(c1 - data_set[i])
min2 = linalg.norm(c2 - data_set[i])
min3 = linalg.norm(c3 - data_set[i])
g = np.argmin([min1,min2,min3]) + 1
result[i] = g
if g == 1:
first += 1
sum1_x += data_set[i][0]
sum1_y += data_set[i][1]
elif g == 2:
second += 1
sum2_x += data_set[i][0]
sum2_y += data_set[i][1]
elif g == 3:
third += 1
sum3_x += data_set[i][0]
sum3_y += data_set[i][1]
c1 = [sum1_x / first,sum1_y / first]
c2 = [sum2_x / second,sum2_y / second]
c3 = [sum3_x / third,sum3_y / third]
cluster1_x = []
cluster1_y = []
cluster2_x = []
cluster2_y = []
cluster3_x = []
cluster3_y = []
for i in range(0,data_set[:,0].size):
if result[i] == 1:
cluster1_x.append(data_set[i][0])
cluster1_y.append(data_set[i][1])
elif result[i] == 2:
cluster2_x.append(data_set[i][0])
cluster2_y.append(data_set[i][1])
elif result[i] == 3:
cluster3_x.append(data_set[i][0])
cluster3_y.append(data_set[i][1])
plt.figure()
plt.title('После кластеризации')
plt.plot(cluster1_x,cluster1_y,'.r')
plt.plot(cluster2_x,cluster2_y,'.g')
plt.plot(cluster3_x,cluster3_y,'.y')
# Центроиды
plt.plot(c1[0],c1[1],'x', markersize=10)
plt.plot(c2[0],c2[1],'x', markersize=10)
plt.plot(c3[0],c3[1],'x', markersize=10) | [
"noreply@github.com"
] | noreply@github.com |
31bdb1a9cc72b072e7e224ef83e66392692f2012 | 78e54d33dc3d4b0e82baf1303d8e8a4e6b7dd7cd | /Ex/settings.py | e133fac987ed90f2afb8fae65f4fe2610f076695 | [] | no_license | graduation3loo2/Exodus-1.1 | fa8ad1fe09b4850b747ecb002ebccee7d74d4028 | b14eb631e16768737470ff4f31ccfe290398e7a2 | refs/heads/master | 2020-06-16T04:25:59.263506 | 2019-07-06T00:24:12 | 2019-07-06T00:24:12 | 195,479,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | """
Django settings for Ex project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&6d990=w@sphpuge*e=*=u&i)*lb@9_dc*+(^ce&&%w%vwm^m6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Home.apps.HomeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ex.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ex.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'Exodus',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"Yousef5020@gmail.com"
] | Yousef5020@gmail.com |
f1033a3a96ab29a179996c21324e2e9a90a9b91e | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /ABC163/ABC163_A.py | bb4bb6c963e85231718a3704fce4761be0b06a79 | [] | no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | r=int(input())
#a,b=map(int,input().split())
#l=list(map(int,input().split()))
#l=[list(map(int,input().split())) for i in range(n)]
ans=2*r*3.14159
print('{:.5f}'.format(ans))
| [
"yinoue.1996787@gmail.com"
] | yinoue.1996787@gmail.com |
1a13773ff297844a15f7e945e42116f1f0e93e2b | fdc0c0f38d9a22a8c71e4b4c8f219a46dcd864c7 | /ex1.py | 6766d023af92159c724f4b20d4ee3042ec90ea02 | [] | no_license | Bonanashelby/PythonTheHardWay | 634bd3ff4b01964fc599be1defd6507e16cfaa2f | 75747f250beac0d4f4004e0fdebba666a5ab76d4 | refs/heads/master | 2021-01-20T08:53:41.060747 | 2017-05-03T21:30:43 | 2017-05-03T21:30:43 | 90,195,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rath you 'not'."
print 'I "said" do not touch this.'
| [
"bonanashelby@gmail.com"
] | bonanashelby@gmail.com |
0a9bcfc4715058ab6ac4036a2b44dcff54788dcf | 2992fcc732685b6841c2675aef63c6e04cd61259 | /open_data/xml/convert_okei_to_json.py | 1bfbbe6f4df19765f23c3d5e3a705a1c58fc1fc1 | [
"MIT"
] | permissive | sunveil/ZakupkiParser | b1628253d636db3c74039766d9d2225e53c13be1 | d7f7750e50db90a5bf0616664dbe2d2189df490f | refs/heads/master | 2023-01-11T05:01:17.554381 | 2020-06-30T17:01:22 | 2020-06-30T17:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import xmltodict
import json
with open('xml/data/okei.xml') as fd:
data = xmltodict.parse(fd.read())
data = data['ns2:nsiOkei']['ns2:body']
out = []
codes = {}
for i in data['ns2:item']:
try:
temp = {}
temp['name'] = i['ns2:nsiOkeiData']['ns2:name']
temp['code'] = i['ns2:nsiOkeiData']['ns2:code']
if temp['code'] not in codes:
codes[temp['code']] = temp['name']
else:
if len(codes[temp['code']]) < len(temp['name']):
codes[temp['code']] = temp['name']
print(codes[temp['code']])
print(temp['code'], temp['name'])
except Exception as e:
print(e)
print(i)
for i in codes:
temp = {}
temp['name'] = codes[i]
temp['code'] = i
out.append(temp)
with open('xml/data/okei.json', 'w') as f:
json.dump(out, f, ensure_ascii=False) | [
"tox.home@ya.ru"
] | tox.home@ya.ru |
ef0f468693701496a02abb449e5a879e5efb4c2d | cf69e3d9e5cdabe1e25fadd15ea4493715cd8caf | /bookings/views.py | e6f9fb03d2a4f246e5a4ecd9b266de148b90c8a4 | [] | no_license | georgeanthony33/Quest-en-France | 85218cac6895a7df9594fa0b03088ccd587eb74c | 55931476cc3145bae732b91a100eee41444699a4 | refs/heads/master | 2023-03-15T17:50:39.477885 | 2022-01-27T22:25:29 | 2022-01-27T22:25:29 | 252,736,453 | 0 | 0 | null | 2023-03-04T16:11:04 | 2020-04-03T13:14:48 | JavaScript | UTF-8 | Python | false | false | 9,420 | py | # pylint: disable=no-member
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_404_NOT_FOUND, HTTP_201_CREATED, HTTP_422_UNPROCESSABLE_ENTITY, HTTP_204_NO_CONTENT, HTTP_202_ACCEPTED, HTTP_401_UNAUTHORIZED
from rest_framework.permissions import IsAuthenticated
from .models import Booking, Person, Ferry
from homes.models import Home
from .serializers import BookingSerializer, PopulatedBookingSerializer, PersonSerializer, FerrySerializer, EditBookingSerializer
import datetime
import stripe
class PaymentDetailsView(APIView):
stripe.api_key = 'sk_test_51HP8LdIjBNRGWKqww2iOkmadjmLZyBVFnZxmN87mkufttb8v6m98ehakWV67nVMyBCqLcqX8JsKCKrV8jIW5LlCh00LP3LNBEG'
def post(self, request):
data = request.data
email = data['email']
payment_method_id = data['payment_method_id']
total_amount = int(data['total_amount'] * 100)
description = data['description']
extra_msg = ''
customer_data = stripe.Customer.list(email=email).data
if len(customer_data) == 0:
customer = stripe.Customer.create(
email=email, payment_method=payment_method_id)
else:
customer = customer_data[0]
extra_msg = "Customer already exists."
stripe.PaymentIntent.create(
customer=customer,
payment_method=payment_method_id,
currency='GBP',
amount=total_amount,
description=description,
confirm=True
)
return Response(status=HTTP_200_OK,
data={
'message': 'Success',
'data': {
'customer_id': customer.id,
'extra_msg': extra_msg
}
}
)
class BookingAvailabilityView(APIView):
def post(self, request):
booking = BookingSerializer(data=request.data)
if booking.is_valid():
start_date = booking.validated_data.get('start_date')
end_date = booking.validated_data.get('end_date')
home = booking.validated_data.get('home')
is_occupied = Home.objects.filter(
bookings__end_date__gt=start_date,
bookings__start_date__lt=end_date,
id=home.id).exists()
if is_occupied:
return Response({'message': 'Selected dates unavailable'}, status=HTTP_200_OK)
return Response({'message': None}, status=HTTP_200_OK)
class BookingListView(APIView):
permission_classes = (IsAuthenticated, )
def get(self, _request):
bookings = Booking.objects.all()
serialized_bookings = PopulatedBookingSerializer(bookings, many=True)
return Response(serialized_bookings.data)
def post(self, request):
request.data['user'] = request.user.id
booking = BookingSerializer(data=request.data)
if booking.is_valid():
start_date = booking.validated_data.get('start_date')
end_date = booking.validated_data.get('end_date')
home = booking.validated_data.get('home')
adults = booking.validated_data.get('adults')
kids = booking.validated_data.get('kids')
if start_date.strftime("%a") == 'Tue' or start_date.strftime("%a") == 'Thu' or end_date.strftime("%a") == 'Tue' or end_date.strftime("%a") == 'Thu':
return Response({'message': 'no arrivals or departures on Tuesdays and Thursdays'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
if (end_date - start_date).days < 6:
return Response({'message': 'minimum of 6 nights stay'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
is_occupied = Home.objects.filter(
bookings__end_date__gt=start_date,
bookings__start_date__lt=end_date,
id=home.id).exists()
if is_occupied:
return Response({'message': 'home already booked for these dates'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
if (adults + kids > 6):
return Response({'message': 'maximum of 6 people per home'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
if (adults == 0):
return Response({'message': 'must be at least one adult'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
booking.save()
return Response(booking.data, status=HTTP_201_CREATED)
return Response(booking.errors, status=HTTP_422_UNPROCESSABLE_ENTITY)
class BookingDetailView(APIView):
permission_classes = (IsAuthenticated, )
def get(self, _request, pk):
try:
booking = Booking.objects.get(pk=pk)
serialized_booking = PopulatedBookingSerializer(booking)
return Response(serialized_booking.data)
except Booking.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND)
def put(self, request, pk):
try:
booking = Booking.objects.get(pk=pk)
updated_booking = BookingSerializer(booking, data=request.data)
if updated_booking.is_valid():
updated_booking.save()
return Response(updated_booking.data, status=HTTP_202_ACCEPTED)
return Response(updated_booking.errors, status=HTTP_422_UNPROCESSABLE_ENTITY)
except Booking.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND)
def delete(self, _request, pk):
try:
booking = Booking.objects.get(pk=pk)
booking.delete()
return Response(status=HTTP_204_NO_CONTENT)
except Booking.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND)
class PersonListView(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request, pk):
request.data['booking'] = pk
person = PersonSerializer(data=request.data)
if person.is_valid():
booking = Booking.objects.get(pk=pk)
serialized_booking = PopulatedBookingSerializer(booking)
if len(serialized_booking.data.get('people')) == 6:
return Response({'message': 'maximum of six people per booking'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
person.save()
updated_booking = Booking.objects.get(pk=pk)
serialized_updated_booking = PopulatedBookingSerializer(updated_booking)
return Response(serialized_updated_booking.data, status=HTTP_201_CREATED)
return Response(person.errors, status=HTTP_422_UNPROCESSABLE_ENTITY)
class PersonDetailView(APIView):
permission_classes = (IsAuthenticated, )
def delete(self, request, **kwargs):
try:
person = Person.objects.get(pk=kwargs['person_pk'])
person.delete()
return Response(status=HTTP_204_NO_CONTENT)
except person.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND)
def put(self, request, **kwargs):
try:
person = Person.objects.get(pk=kwargs['person_pk'])
updated_person = PersonSerializer(person, data=request.data)
if updated_person.is_valid():
updated_person.save()
return Response(updated_person.data, status=HTTP_202_ACCEPTED)
return Response(updated_person.errors, status=HTTP_422_UNPROCESSABLE_ENTITY)
except Person.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND)
class FerryListView(APIView):
permission_classes = (IsAuthenticated, )
def post(self, request, pk):
request.data['booking'] = pk
ferry = FerrySerializer(data=request.data)
if ferry.is_valid():
booking = Booking.objects.get(pk=pk)
serialized_booking = PopulatedBookingSerializer(booking)
if len(serialized_booking.data.get('ferry_quote')) == 1:
return Response({'message': 'maximum of one ferry per booking'}, status=HTTP_422_UNPROCESSABLE_ENTITY)
ferry.save()
serialized_updated_booking = PopulatedBookingSerializer(booking)
return Response(serialized_updated_booking.data, status=HTTP_201_CREATED)
return Response(ferry.errors, status=HTTP_422_UNPROCESSABLE_ENTITY)
class FerryDetailView(APIView):
permission_classes = (IsAuthenticated, )
def delete(self, request, **kwargs):
try:
ferry = Ferry.objects.get(pk=kwargs['ferry_pk'])
ferry.delete()
return Response(status=HTTP_204_NO_CONTENT)
except ferry.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND)
def put(self, request, **kwargs):
try:
ferry = Ferry.objects.get(pk=kwargs['ferry_pk'])
updated_ferry = FerrySerializer(ferry, data=request.data)
if updated_ferry.is_valid():
updated_ferry.save()
return Response(updated_ferry.data, status=HTTP_202_ACCEPTED)
return Response(updated_ferry.errors, status=HTTP_422_UNPROCESSABLE_ENTITY)
except Ferry.DoesNotExist:
return Response({'message': 'Not Found'}, status=HTTP_404_NOT_FOUND) | [
"georgeanthony33@gmail.com"
] | georgeanthony33@gmail.com |
aef37705f286e46903ffcd71491000c635addd56 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/531LonelyPixelI.py | 85d2ad88ff367da5b050933632ef6d2bb1308b12 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 913 | py | # coding=utf-8
'''
Created on 2017�4�11�
@author: Administrator
'''
class Solution(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:rtype: int
"""
if picture:
rows = [0] * len(picture)
cols = [0] * len(picture[0])
for row in range(len(picture)):
for col in range(len(picture[row])):
if picture[row][col] == "B":
rows[row] += 1
cols[col] += 1
ans = 0
for row in range(len(picture)):
for col in range(len(picture[row])):
if rows[row] == 1 and cols[col] == 1 and picture[row][col] == "B":
ans += 1
return ans
else:
return 0
picture = ["BBB"]
print Solution().findLonelyPixel(picture)
| [
"yanhuang1293@gmail.com"
] | yanhuang1293@gmail.com |
daa9a4137885443683548c1077b46ef672bc3ad2 | adc6bfb528a410474aa5967032b572d173c15f6b | /test/__init__.py | ad45b2685cb2edeb1a4db958ab5fa8bf9999ba5b | [] | no_license | millerhooks/pydor | 13aa3bff99f88d3a8dd8543e8a3de4be81001791 | e8c7a9af42a626d3316079471434b6ce757bfb65 | refs/heads/master | 2023-01-27T19:50:00.929475 | 2018-10-02T21:18:26 | 2018-10-02T21:18:26 | 151,347,717 | 0 | 0 | null | 2023-01-11T11:21:39 | 2018-10-03T01:47:43 | Python | UTF-8 | Python | false | false | 17 | py | #import api_test
| [
"tomas.dohnalek@firma.seznam.cz"
] | tomas.dohnalek@firma.seznam.cz |
abc19a89f586d28d24cd2468c387a49113282b1c | 03520abb58a66aeed9a556d53e3a21006af02dde | /named_storms/migrations/0079_nsempsa_covered_data_snapshot.py | 73c0e3338456b81ae6bf6fe88566507a8f794e9c | [] | no_license | flackdl/cwwed | 23ce1d1a5e48a57ee2cb3229860f1b97ccc81636 | 0a1454897d397cd5e1652643616abe883ccc853b | refs/heads/master | 2023-07-21T20:05:35.093270 | 2023-06-29T15:29:26 | 2023-06-29T15:29:26 | 118,942,664 | 1 | 0 | null | 2023-09-12T21:56:59 | 2018-01-25T17:06:47 | Python | UTF-8 | Python | false | false | 530 | py | # Generated by Django 2.2.6 on 2019-10-17 17:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('named_storms', '0078_auto_20191017_1705'),
]
operations = [
migrations.AddField(
model_name='nsempsa',
name='covered_data_snapshot',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='named_storms.NamedStormCoveredDataSnapshot'),
),
]
| [
"flackattack@gmail.com"
] | flackattack@gmail.com |
1f52bb23b402ead22c0e51d79040f6549da12792 | ac1a38f3b28259ab11caae989a94614c250bbc78 | /remove_all_plugins.py | d89594161af9840c480f6e274f72bf4ff5721eec | [] | no_license | eonardol/cordova-phonegap-utils | 9f819bc5f0c8ae0a5f1510c569713e47a65e4ddb | b599405be2d00c48fed8f9316cbe62c7e7c7451e | refs/heads/master | 2020-07-04T05:47:59.813961 | 2015-04-17T15:35:51 | 2015-04-17T15:35:51 | 25,036,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/env python3
import sys, os.path, subprocess
stdoutdata = subprocess.getoutput("cordova plugin list")
if ("command not found" in stdoutdata):
print("cordova not installed :(");
sys.exit(1)
if ("Current working directory is not a Cordova-based project." in stdoutdata):
print("not a cordova project ;(");
sys.exit(1)
if ("No plugins added" in stdoutdata):
print("all plugins have been removed :)");
sys.exit(1)
for item in stdoutdata.split("\n"):
cmd = "cordova plugin remove " + item.split()[0]
print(cmd)
result = subprocess.getoutput(cmd)
print(result)
print("done! bye!! ;)")
| [
"eonardol@users.noreply.github.com"
] | eonardol@users.noreply.github.com |
bdd60ed37f1b70cbf11c34d6e1db1b4d9f5d6caa | ef0917d016a1a2b60b2ccbb18325eadab37b61a8 | /Mplayer_Reproductor.py | 8e3074068ccd5aa85d1863d99f949ea05e5a9ca3 | [] | no_license | sugar-activities/4355-activity | a8dcab94e01c5e45b78196a69df4391e48263e04 | acefbd1ea5eabaf2783326bb1c9e2bd8f4255007 | refs/heads/master | 2021-01-19T23:14:25.746896 | 2017-04-21T05:54:35 | 2017-04-21T05:54:35 | 88,936,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,529 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mplayer_Reproductor.py por:
# Flavio Danesse <fdanesse@gmail.com>
# CeibalJAM! - Uruguay
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gobject, time, os, subprocess, platform
from gettext import gettext as _
UPDATE_TIME = 30
STDOUT = "/tmp/jamediaout%d" % time.time()
STDERR = "/dev/null"
MPLAYER = "mplayer"
if "olpc" in platform.platform(): MPLAYER = "./mplayer"
class Mplayer_Reproductor(gobject.GObject):
__gsignals__ = {"cambio_estado":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
"update_progress":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_FLOAT,)),
"video":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_BOOLEAN,)),
"mplayer_info":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,))}
''' Posibles estados: "playing Audio_Video", "paused Audio_Video", "stoped Audio_Video", "playing Radio", "playing TV", None '''
def __init__(self, id_pantalla):
self.__gobject_init__()
self.ejecutable = MPLAYER
self.id_pantalla = id_pantalla
self.tipo_de_datos = None
self.mplayer = None
self.salida = None
self.entrada = None
self.Actualizador = False
self.pista_actual = None
self.estado= None
self.progress= 0.0
#self.name_origen= None
estado= property(self.get_estado, self.set_estado)
progress= property(self.get_progress, self.set_progress)
'''
estructura= "%s -wid %i -slave -idle -nolirc -quiet −volume 100" % (self.ejecutable, self.id_pantalla)
self.mplayer= subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDERR,"r+b"), universal_newlines=True)
self.entrada= self.mplayer.stdin
self.salida= open(STDOUT,"r")'''
self.set_estado(None)
# ----------- Propiedades -----------
def get_estado(self):
return self.estado
def set_estado(self, valor= None):
self.estado = valor
self.emit("cambio_estado", self.get_estado())
def get_progress(self):
return self.progress
def set_progress(self, valor):
self.progress = valor
self.emit("update_progress", self.get_progress())
# ----------- Propiedades -----------
# ------------------------ ACTUALIZACIONES De REPRODUCCION ------------------------
def update_progress(self):
if not self.entrada.closed:
self.entrada.write("%s 0\n" % ("get_percent_pos"))
self.entrada.flush()
linea = self.salida.readline()
if linea:
if "ANS_PERCENT_POSITION" in linea:
self.get_progress_in_mplayer(linea)
elif "Video: no video" in linea or "Audio only file format detected" in linea:
self.emit("video", False)
elif "Cache" in linea:
self.get_progress_cache_in_mplayer(linea)
elif "Movie-Aspect" in linea:
self.emit("video", True)
elif "Starting playback" in linea:
#self.emit("cambio_estado", self.get_estado())
pass
elif "Position:" in linea or "VO:" in linea or "AO:" in linea:
# AO: [pulse] 22050Hz 2ch s16le (2 bytes per sample)
# VO: [xv] 540x360 => 540x360 Planar YV12
#self.emit("mplayer_info", linea)
pass
elif "Resolving" in linea or "Connecting" in linea:
#self.emit("mplayer_info", linea)
pass
elif "Name" in linea:
#self.name_origen= linea.split(": ")[-1]
pass
elif "Playing" in linea:
#self.name_origen= linea.split("Playing ")[-1]
pass
elif "Opening" in linea or "AUDIO" in linea or "Selected" in linea \
or "Genre" in linea or "Website" in linea or "Bitrate" in linea:
'''
Opening video decoder: [ffmpeg] FFmpeg's libavcodec codec family
Selected video codec: [ffh264] vfm: ffmpeg (FFmpeg H.264)
Opening audio decoder: [faad] AAC (MPEG2/4 Advanced Audio Coding)
AUDIO: 44100 Hz, 2 ch, s16le, 119.9 kbit/8.50% (ratio: 14989->176400)
Selected audio codec: [faad] afm: faad (FAAD AAC (MPEG-2/MPEG-4 Audio))'''
#self.emit("mplayer_info", linea)
pass
else:
'''
mplayer: Symbol `ff_codec_bmp_tags' has different size in shared object, consider re-linking
eos/Beautiful Liar - Beyonce ft Shakira.
stream 0: video (h264), -vid 0
[lavf] stream 1: audio (aac), -aid 0
VIDEOopen: No such file or directory
[MGA] Couldn't open: /dev/mga_vid
open: No such file or directory
[MGA] Couldn't open: /dev/mga_vid
[VO_TDFXFB] Can't open /dev/fb0: Permission denied.
[VO_3DFX] Unable to open /dev/3dfx.
Failed to open VDPAU backend libvdpau_nvidia.so: cannot open shared object file: No such file or directo==========================================================================
==========================================================================
==========================================================================
==========================================================================
AO: [pulse] 44100Hz 2ch s16le (2 bytes per sample)
Starting playback...
VO: [xv] 320x240 => 320x240 Planar YV12'''
pass
return True
def get_progress_in_mplayer(self, linea):
try:
if "Cache size" in linea:
return
try:
progress = float(linea[linea.index('=')+1:-1])
if self.get_progress()!= progress:
self.set_progress(progress)
if self.get_progress() >= 100.0:
self.set_estado("stoped Audio_Video")
except Exception, e:
print "Error en Progreso de Reproducción: %s" % (e)
#print linea
except Exception, e:
print "Error en Progreso de Reproducción: %s" % (e)
#print linea
def get_progress_cache_in_mplayer(self, linea):
if "Cache not responding" in linea: return
try:
if "Cache size" in linea:
return
try:
progress = float((linea.split(": ")[-1]).split("%")[0])/20*100
except:
return
if self.get_progress()!= progress:
self.set_progress(progress)
#self.emit("mplayer_info", "Cargando Caché")
except Exception, e:
print "Error en Carga de Caché: %s" % (e)
#print linea
# ------------------------ ACTUALIZACIONES De REPRODUCCION ------------------------
# ------------------------ REPRODUCCION -------------------------------------------
def seek(self, valor):
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.entrada.write('seek %s 1 0\n' % (float(valor)))
self.entrada.flush()
self.set_estado("playing Audio_Video")
self.Actualizador = gobject.timeout_add(UPDATE_TIME, self.update_progress)
def play(self, direccion, tipo_de_datos):
self.tipo_de_datos = tipo_de_datos
if tipo_de_datos == "Radio":
self.play_radio(direccion)
elif tipo_de_datos == "TV":
self.play_tv(direccion)
elif tipo_de_datos == "Audio_Video":
self.play_Audio_Video(direccion)
def play_Audio_Video(self, direccion):
self.pista_actual = "%s%s%s" % ("\"", direccion, "\"")
self.play_archivo(self.pista_actual)
def play_archivo(self, direccion):
ejecutable_cache_pantalla = "%s -cache %i -wid %i" % (self.ejecutable, 1024, self.id_pantalla)
estructura= "%s -slave -idle -nolirc -rtc -nomouseinput -noconsolecontrols -nojoystick" % (ejecutable_cache_pantalla)
self.mplayer = subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDOUT,"r+b"), universal_newlines=True)
self.entrada = self.mplayer.stdin
self.salida = open(STDOUT,"r")
self.entrada.write("loadfile %s 0\n" % direccion)
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.Actualizador = gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.pista_actual = direccion
self.set_estado("playing Audio_Video")
self.emit("mplayer_info", self.pista_actual)
def play_radio(self, direccion):
ejecutable_cache_pantalla = "%s -cache %i" % (self.ejecutable, 32)
estructura= "%s -slave -idle -nolirc -quiet -rtc -nomouseinput -noconsolecontrols -nojoystick" % (ejecutable_cache_pantalla)
self.mplayer= subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDOUT,"r+b"), universal_newlines=True)
self.entrada= self.mplayer.stdin
self.salida= open(STDOUT,"r")
self.entrada.write("loadfile %s 0\n" % direccion)
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
#self.Actualizador= gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.pista_actual= direccion
self.set_estado("playing Radio")
self.emit("mplayer_info", self.pista_actual)
def play_tv(self, direccion):
ejecutable_cache_pantalla = "%s -cache %i -wid %i" % (self.ejecutable, 1024, self.id_pantalla)
estructura= "%s -slave -idle -nolirc -quiet -rtc -nomouseinput -noconsolecontrols -nojoystick" % (ejecutable_cache_pantalla)
self.mplayer= subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDOUT,"r+b"), universal_newlines=True)
self.entrada= self.mplayer.stdin
self.salida= open(STDOUT,"r")
self.entrada.write("loadfile %s 0\n" % direccion)
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
#self.Actualizador= gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.pista_actual= direccion
self.set_estado("playing TV")
self.emit("mplayer_info", self.pista_actual)
def pause_play(self):
try:
if self.entrada:
if self.get_estado() == "playing Audio_Video": # pausa
self.entrada.write('pause 0\n')
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.set_estado("paused Audio_Video")
self.emit("mplayer_info", _("Reproducción Pausada"))
elif self.get_estado() == "paused Audio_Video":
self.entrada.write('pause 0\n') # hace unpause
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.Actualizador = gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.set_estado("playing Audio_Video")
self.emit("mplayer_info", "%s: %s" % (_("Reproduciendo"), self.pista_actual))
except Exception, e:
print "HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR", e
def quit(self, widget=None):
try:
if self.entrada:
self.entrada.write('%s 0\n' % "quit")
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
except Exception, e:
print "HA OCURRIDO UN ERROR EN QUIT DEL REPRODUCTOR", e
self.set_progress(0.0)
if os.path.exists(STDOUT): os.unlink(STDOUT)
self.pista_actual = None
self.set_estado(None)
self.emit("mplayer_info", _("Reproducción Detenida"))
# ------------------------ REPRODUCCION -------------------------------------------
| [
"ignacio@sugarlabs.org"
] | ignacio@sugarlabs.org |
a06781d0ec3b9a832a6808fad25b00373a0a8076 | 094fe32f3144ee03d53df10ae3e8518f372d6b62 | /plugin.audio.bbc-radio/addon.py | 1604dd4283dfee730d1e5112c7a443b40b068e2d | [] | no_license | PhantomRaspberryBlower/repository.prb-entertainment-pack | 0b72ad49b4183c8b45b704295beadf09831f23a3 | 02e103228ad86aee3d8cef6fac3806c1f3605f45 | refs/heads/master | 2022-06-19T04:14:07.841398 | 2022-05-17T09:53:21 | 2022-05-17T09:53:21 | 192,784,863 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 28,760 | py | import xbmcgui
import xbmcplugin
import xbmcaddon
import sys
import urllib
import datetime
import re
# Written by: Phantom Raspberry Blower
# Date: 21-02-2017
# Description: Addon for listening to BBC Radio live broadcasts
# Get addon details
__addon_id__ = 'plugin.audio.bbc-radio'
__addon__ = xbmcaddon.Addon(id=__addon_id__)
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
__fanart__ = __addon__.getAddonInfo('fanart')
__author__ = "Phantom Raspberry Blower"
__url__ = sys.argv[0]
__handle__ = int(sys.argv[1])
# Get localized language words
__language__ = __addon__.getLocalizedString
_national_radio = __language__(30001)
_nations_and_regions = __language__(30002)
_local_radio = __language__(30003)
_nationwide_radio_stations = __language__(30004)
_national_and_regional_radio_stations = __language__(30005)
_local_radio_stations = __language__(30006)
_bbc_radio_1_desc = __language__(30007)
_bbc_radio_1_extra_desc = __language__(30008)
_bbc_radio_2_desc = __language__(30009)
_bbc_radio_3_desc = __language__(30010)
_bbc_radio_4_desc = __language__(30011)
_bbc_radio_4_extra_desc = __language__(30012)
_bbc_radio_5_live_desc = __language__(30013)
_bbc_radio_5_live_extra_desc = __language__(30014)
_bbc_radio_6_music_desc = __language__(30015)
_bbc_asian_network_desc = __language__(30016)
_bbc_radio_cymru_desc = __language__(30017)
_bbc_radio_foyle_desc = __language__(30018)
_bbc_radio_nan_gaidheal_desc = __language__(30019)
_bbc_radio_scotland_desc = __language__(30020)
_bbc_radio_ulster_desc = __language__(30021)
_bbc_radio_wales_desc = __language__(30022)
_internet_radio = __language__(30023)
_something_wicked_happened = __language__(30721)
_error = __language__(30722)
image_path = 'special://home/addons/' + __addon_id__ + '/resources/media/'
category_list = [_national_radio,
_nations_and_regions,
_local_radio]
station_list_nr = ['BBC Radio 1',
'BBC Radio 1xtra',
'BBC Radio 2',
'BBC Radio 3',
'BBC Radio 4',
'BBC Radio 4 Extra',
'BBC Radio 5 Live',
'BBC Radio 5 Live Sports Extra',
'BBC Radio 6 Music',
'BBC Asian Network',
'BBC World Service UK',
'BBC World Service',
'BBC World News']
station_list_nar = ['Radio Cymru',
'BBC Radio Foyle',
'BBC Radio nan Gaidheal',
'BBC Radio Scotland',
'BBC Radio Ulster',
'BBC Radio Wales']
station_list_lr = ['BBC Radio Berkshire',
'BBC Radio Bristol',
'BBC Radio Cambridgeshire',
'BBC Radio Cornwall',
'BBC Coventry & Warwickshire',
'BBC Radio Cumbria',
'BBC Radio Derby',
'BBC Radio Devon',
'BBC Essex',
'BBC Radio Gloucestershire',
'BBC Radio Guernsey',
'BBC Hereford & Worcester',
'BBC Radio Humberside',
'BBC Radio Jersey',
'BBC Radio Kent',
'BBC Radio Lancashire',
'BBC Radio Leeds',
'BBC Radio Leicester',
'BBC Radio Lincolnshire',
'BBC Radio London',
'BBC Radio Manchester',
'BBC Radio Merseyside',
'BBC Newcastle',
'BBC Radio Norfolk',
'BBC Radio Northampton',
'BBC Radio Nottingham',
'BBC Radio Oxford',
'BBC Radio Sheffield',
'BBC Radio Shropshire',
'BBC Radio Solent',
'BBC Somerset',
'BBC Radio Stoke',
'BBC Radio Suffolk',
'BBC Surrey',
'BBC Sussex',
'BBC Tees',
'BBC Three Counties Radio',
'BBC Wiltshire',
'BBC WM 95.6',
'BBC Radio York']
categories = {_national_radio: {'thumb': image_path + 'bbc-national-radio-logo.png',
'fanart': image_path + 'bbc-national-radio.jpg',
'desc': _nationwide_radio_stations
},
_nations_and_regions: {'thumb': image_path + 'bbc-nations-radio-logo.png',
'fanart': image_path + 'bbc-nations-radio.jpg',
'desc': _national_and_regional_radio_stations
},
_local_radio: {'thumb': image_path + 'bbc-local-radio-logo.png',
'fanart': image_path + 'bbc-local-radio.jpg',
'desc': _local_radio_stations}
}
stations = {'BBC Radio 1': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio1_mf_p',
'thumb': image_path + 'bbc-radio-1-logo.png',
'fanart': image_path + 'bbc-radio-1.jpg',
'desc': _bbc_radio_1_desc},
'BBC Radio 1xtra': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio1xtra_mf_p',
'thumb': image_path + 'bbc-radio-1xtra-logo.png',
'fanart': image_path + 'bbc-radio-1xtra.jpg',
'desc': _bbc_radio_1_extra_desc},
'BBC Radio 2': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio2_mf_p',
'thumb': image_path + 'bbc-radio-2-logo.png',
'fanart': image_path + 'bbc-radio-2.jpg',
'desc': _bbc_radio_2_desc},
'BBC Radio 3': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio3_mf_p',
'thumb': image_path + 'bbc-radio-3-logo.png',
'fanart': image_path + 'bbc-radio-3.jpg',
'desc': _bbc_radio_3_desc},
'BBC Radio 4': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio4fm_mf_p',
'thumb': image_path + 'bbc-radio-4-logo.png',
'fanart': image_path + 'bbc-radio-4.jpg',
'desc': _bbc_radio_4_desc},
'BBC Radio 4 Extra': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio4extra_mf_p',
'thumb': image_path + 'bbc-radio-4-extra-logo.png',
'fanart': image_path + 'bbc-radio-4-extra.jpg',
'desc': _bbc_radio_4_extra_desc},
'BBC Radio 5 Live': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio5live_mf_p',
'thumb': image_path + 'bbc-radio-5-live-logo.png',
'fanart': image_path + 'bbc-radio-5-live.jpg',
'desc': _bbc_radio_5_live_desc},
'BBC Radio 5 Live Sports Extra': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_radio5extra_mf_p',
'thumb': image_path + 'bbc-radio-5-live-sports-extra-logo.png',
'fanart': image_path + 'bbc-radio-5-live-sports-extra.jpg',
'desc': _bbc_radio_5_live_extra_desc},
'BBC Radio 6 Music': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_6music_mf_p',
'thumb': image_path + 'bbc-radio-6-music-logo.png',
'fanart': image_path + 'bbc-radio-6-music.jpg',
'desc': _bbc_radio_6_music_desc},
'BBC Asian Network': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_asianet_mf_p',
'thumb': image_path + 'bbc-asian-network-logo.png',
'fanart': image_path + 'bbc-asian-network.jpg',
'desc': _bbc_asian_network_desc},
'BBC World Service UK': {'url': 'http://bbcwssc.ic.llnwd.net/stream/bbcwssc_mp1_ws-eieuk',
'thumb': image_path + 'bbc-world-service-uk-logo.png',
'fanart': image_path + 'bbc-world-service.jpg',
'desc': ''},
'BBC World News': {'url': 'http://bbcwssc.ic.llnwd.net/stream/bbcwssc_mp1_ws-einws',
'thumb': image_path + 'bbc-world-news-logo.png',
'fanart': image_path + 'bbc-world-news.jpg',
'desc': ''},
'BBC World Service': {'url': 'http://bbcwssc.ic.llnwd.net/stream/bbcwssc_mp1_ws-eie',
'thumb': image_path + 'bbc-world-service-logo.png',
'fanart': image_path + 'bbc-world-service.jpg',
'desc': ''},
'Radio Cymru': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_cymru_mf_p',
'thumb': image_path + 'radio-cymru-logo.png',
'fanart': image_path + 'radio-cymru.jpg',
'desc': _bbc_radio_cymru_desc},
'BBC Radio Foyle': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_foyle_mf_p',
'thumb': image_path + 'bbc-radio-foyle-logo.png',
'fanart': image_path + 'bbc-radio-foyle.jpg',
'desc': _bbc_radio_foyle_desc},
'BBC Radio nan Gaidheal': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_nangaidheal_mf_p',
'thumb': image_path + 'bbc-radio-nan-gaidheal-logo.png',
'fanart': image_path + 'bbc-radio-nan-gaidheal.jpg',
'desc': _bbc_radio_nan_gaidheal_desc},
'BBC Radio Scotland': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_scotlandfm_mf_p',
'thumb': image_path + 'bbc-radio-scotland-logo.png',
'fanart': image_path + 'bbc-radio-scotland.jpg',
'desc': _bbc_radio_scotland_desc},
'BBC Radio Ulster': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_ulster_mf_p',
'thumb': image_path + 'bbc-radio-ulster-logo.png',
'fanart': image_path + 'bbc-radio-ulster.jpg',
'desc': _bbc_radio_ulster_desc},
'BBC Radio Wales': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_walesmw_mf_p',
'thumb': image_path + 'bbc-radio-wales-logo.png',
'fanart': image_path + 'bbc-radio-wales.jpg',
'desc': _bbc_radio_wales_desc},
'BBC Radio Berkshire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrberk_mf_p',
'thumb': image_path + 'bbc-radio-berkshire-logo.png',
'fanart': image_path + 'bbc-radio-berkshire.jpg',
'desc': ''},
'BBC Radio Bristol': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrbris_mf_p',
'thumb': image_path + 'bbc-radio-bristol-logo.png',
'fanart': image_path + 'bbc-radio-bristol.jpg',
'desc': ''},
'BBC Radio Cambridgeshire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrcambs_mf_p',
'thumb': image_path + 'bbc-radio-cambridgeshire-logo.png',
'fanart': image_path + 'bbc-radio-cambridgeshire.jpg',
'desc': ''},
'BBC Radio Cornwall': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrcorn_mf_p',
'thumb': image_path + 'bbc-radio-cornwall-logo.png',
'fanart': image_path + 'bbc-radio-cornwall.jpg',
'desc': ''},
'BBC Coventry & Warwickshire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrwmcandw_mf_p',
'thumb': image_path + 'bbc-coventry-warwickshire-logo.png',
'fanart': image_path + 'bbc-coventry-warwickshire.jpg',
'desc': ''},
'BBC Radio Cumbria': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrcumbria_mf_p',
'thumb': image_path + 'bbc-radio-cumbria-logo.png',
'fanart': image_path + 'bbc-radio-cumbria.jpg',
'desc': ''},
'BBC Radio Derby': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrderby_mf_p',
'thumb': image_path + 'bbc-radio-derby-logo.png',
'fanart': image_path + 'bbc-radio-derby.jpg',
'desc': ''},
'BBC Radio Devon': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrdevon_mf_p',
'thumb': image_path + 'bbc-radio-devon-logo.png',
'fanart': image_path + 'bbc-radio-devon.jpg',
'desc': ''},
'BBC Essex': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lressex_mf_p',
'thumb': image_path + 'bbc-essex-logo.png',
'fanart': image_path + 'bbc-essex.jpg',
'desc': ''},
'BBC Radio Gloucestershire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrgloucs_mf_p',
'thumb': image_path + 'bbc-radio-gloucestershire-logo.png',
'fanart': image_path + 'bbc-radio-gloucestershire.jpg',
'desc': ''},
'BBC Radio Guernsey': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrguern_mf_p',
'thumb': image_path + 'bbc-radio-guernsey-logo.png',
'fanart': image_path + 'bbc-radio-guernsey.jpg',
'desc': ''},
'BBC Hereford & Worcester': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrhandw_mf_p',
'thumb': image_path + 'bbc-hereford-worcester-logo.png',
'fanart': image_path + 'bbc-hereford-worcester.jpg',
'desc': ''},
'BBC Radio Humberside': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrhumber_mf_p',
'thumb': image_path + 'bbc-radio-humberside-logo.png',
'fanart': image_path + 'bbc-radio-humberside.jpg',
'desc': ''},
'BBC Radio Jersey': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrjersey_mf_p',
'thumb': image_path + 'bbc-radio-jersey-logo.png',
'fanart': image_path + 'bbc-radio-jersey.jpg',
'desc': ''},
'BBC Radio Kent': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrkent_mf_p',
'thumb': image_path + 'bbc-radio-kent-logo.png',
'fanart': image_path + 'bbc-radio-kent.jpg',
'desc': ''},
'BBC Radio Lancashire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrlancs_mf_p',
'thumb': image_path + 'bbc-radio-lancashire-logo.png',
'fanart': image_path + 'bbc-radio-lancashire.jpg',
'desc': ''},
'BBC Radio Leeds': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrleeds_mf_p',
'thumb': image_path + 'bbc-radio-leeds-logo.png',
'fanart': image_path + 'bbc-radio-leeds.jpg',
'desc': ''},
'BBC Radio Leicester': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrleics_mf_p',
'thumb': image_path + 'bbc-radio-leicester-logo.png',
'fanart': image_path + 'bbc-radio-leicester.jpg',
'desc': ''},
'BBC Radio Lincolnshire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrlincs_mf_p',
'thumb': image_path + 'bbc-radio-lincolnshire-logo.png',
'fanart': image_path + 'bbc-radio-lincolnshire.jpg',
'desc': ''},
'BBC Radio London': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrldn_mf_p',
'thumb': image_path + 'bbc-radio-london-logo.png',
'fanart': image_path + 'bbc-radio-london.jpg',
'desc': ''},
'BBC Radio Manchester': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrmanc_mf_p',
'thumb': image_path + 'bbc-radio-manchester-logo.png',
'fanart': image_path + 'bbc-radio-manchester.jpg',
'desc': ''},
'BBC Radio Merseyside': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrmersey_mf_p',
'thumb': image_path + 'bbc-radio-merseyside-logo.png',
'fanart': image_path + 'bbc-radio-merseyside.jpg',
'desc': ''},
'BBC Newcastle': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrnewc_mf_p',
'thumb': image_path + 'bbc-newcastle-logo.png',
'fanart': image_path + 'bbc-newcastle.jpg',
'desc': ''},
'BBC Radio Norfolk': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrnorfolk_mf_p',
'thumb': image_path + 'bbc-radio-norfolk-logo.png',
'fanart': image_path + 'bbc-radio-norfolk.jpg',
'desc': ''},
'BBC Radio Northampton': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrnthhnts_mf_p',
'thumb': image_path + 'bbc-radio-northampton-logo.png',
'fanart': image_path + 'bbc-radio-northampton.jpg',
'desc': ''},
'BBC Radio Nottingham': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrnotts_mf_p',
'thumb': image_path + 'bbc-radio-nottingham-logo.png',
'fanart': image_path + 'bbc-radio-nottingham.jpg',
'desc': ''},
'BBC Radio Oxford': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lroxford_mf_p',
'thumb': image_path + 'bbc-radio-oxford-logo.png',
'fanart': image_path + 'bbc-radio-oxford.jpg',
'desc': ''},
'BBC Radio Sheffield': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsheff_mf_p',
'thumb': image_path + 'bbc-radio-sheffield-logo.png',
'fanart': image_path + 'bbc-radio-sheffield.jpg',
'desc': ''},
'BBC Radio Shropshire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrshrops_mf_p',
'thumb': image_path + 'bbc-radio-shropshire-logo.png',
'fanart': image_path + 'bbc-radio-shropshire.jpg',
'desc': ''},
'BBC Radio Solent': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsolent_mf_p',
'thumb': image_path + 'bbc-radio-solent-logo.png',
'fanart': image_path + 'bbc-radio-solent.jpg',
'desc': ''},
'BBC Somerset': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsomer_mf_p',
'thumb': image_path + 'bbc-somerset-logo.png',
'fanart': image_path + 'bbc-somerset.jpg',
'desc': ''},
'BBC Radio Stoke': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsomer_mf_p',
'thumb': image_path + 'bbc-radio-stoke-logo.png',
'fanart': image_path + 'bbc-radio-stoke.jpg',
'desc': ''},
'BBC Radio Suffolk': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsuffolk_mf_p',
'thumb': image_path + 'bbc-radio-suffolk-logo.png',
'fanart': image_path + 'bbc-radio-suffolk.jpg',
'desc': ''},
'BBC Surrey': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsurrey_mf_p',
'thumb': image_path + 'bbc-surrey-logo.png',
'fanart': image_path + 'bbc-surrey.jpg',
'desc': ''},
'BBC Sussex': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrsussex_mf_p',
'thumb': image_path + 'bbc-sussex-logo.png',
'fanart': image_path + 'bbc-sussex.jpg',
'desc': ''},
'BBC Tees': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrtees_mf_p',
'thumb': image_path + 'bbc-tees-logo.png',
'fanart': image_path + 'bbc-tees.jpg',
'desc': ''},
'BBC Three Counties Radio': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lr3cr_mf_p',
'thumb': image_path + 'bbc-three-counties-radio-logo.png',
'fanart': image_path + 'bbc-three-counties-radio.jpg',
'desc': ''},
'BBC Wiltshire': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrwilts_mf_p',
'thumb': image_path + 'bbc-wiltshire-logo.png',
'fanart': image_path + 'bbc-wiltshire.jpg',
'desc': ''},
'BBC WM 95.6': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lrwm_mf_p',
'thumb': image_path + 'bbc-wm956-logo.png',
'fanart': image_path + 'bbc-wm956.jpg',
'desc': ''},
'BBC Radio York': {'url': 'http://bbcmedia.ic.llnwd.net/stream/bbcmedia_lryork_mf_p',
'thumb': image_path + 'bbc-radio-york-logo.png',
'fanart': image_path + 'bbc-radio-york.jpg',
'desc': ''}
}
def list_categories():
"""
Create the list of playable videos in the Kodi interface.
:return: None
"""
for item in category_list:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=item)
addDir(item,
__url__,
1,
categories[item]['thumb'],
categories[item]['fanart'],
categories[item]['desc'],
isFolder=True)
def get_links(name, url, icon, fanart):
"""
Create the list of playable audio links
"""
if name == _national_radio:
cur_list = station_list_nr
elif name == _nations_and_regions:
cur_list = station_list_nar
elif name == _local_radio:
cur_list = station_list_lr
for item in cur_list:
# Create a list ite with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=item)
addDir(item,
stations[item]['url'],
2,
stations[item]['thumb'],
stations[item]['fanart'],
stations[item]['desc'],
isFolder=False)
def play_audio(name, url, icon, fanart):
"""
Create a list item to the audio stream and
start playing the audio stream.
"""
if xbmc.Player().isPlayingAudio:
xbmc.Player().stop()
liz = xbmcgui.ListItem(str(name),
iconImage='Icon.png',
thumbnailImage=icon)
# Set a fanart image for the list item.
liz.setProperty('fanart_image', fanart)
xbmc.Player().play(url, liz, False)
xbmc.executebuiltin('Action(Fullscreen)')
def addDir(name, url, mode, icon, fanart, desc, isFolder=False):
"""
Display a list of links
"""
u = (sys.argv[0] + '?url=' + urllib.quote_plus(url) +
'&mode=' + str(mode) + '&name=' + urllib.quote_plus(name) +
'&icon=' + str(icon) + '&fanart=' + str(fanart))
ok = True
liz = xbmcgui.ListItem(name,
iconImage="DefaultFolder.png",
thumbnailImage=icon)
# Set a fanart image for the list item.
liz.setProperty('fanart_image', fanart)
# Set additional info for the list item.
liz.setInfo(type='music',
infoLabels={'title': name,
'album': __addonname__,
'artist': name,
'genre': _internet_radio,
'year': 2015
}
)
liz.setInfo(type='video',
infoLabels={'title': name,
'genre': _internet_radio,
'plot': desc,
'year': 2015,
'status': 'Live',
'mediatype': 'musicvideo'
}
)
ok = xbmcplugin.addDirectoryItem(handle=__handle__,
url=u,
listitem=liz,
isFolder=isFolder)
return ok
def get_params():
"""
Get the current parameters
:return: param[]
"""
param=[]
paramstring=sys.argv[2]
if len(paramstring[1:])>=1:
params=paramstring[1:]
pairsofparams=params.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def msg_notification(heading, message, icon, duration):
# Show message notification
dialog = xbmcgui.Dialog()
dialog.notification(heading, message, icon, duration)
def message(message, title):
# Display message to user
dialog = xbmcgui.Dialog()
dialog.ok(title, message)
# Define local variables
params = get_params()
url = None
name = None
mode = None
icon = None
fanart = None
# Parse the url, name, mode, icon and fanart parameters
try:
url = urllib.unquote_plus(params['url'])
except:
pass
try:
name = urllib.unquote_plus(params['name'])
except:
pass
try:
mode = int(params['mode'])
except:
pass
try:
icon = urllib.unquote_plus(params['icon'])
except:
pass
try:
fanart = urllib.unquote_plus(params['fanart'])
except:
pass
# Route the request based upon the mode number
if mode is None or url is None or len(url) < 1:
list_categories()
elif mode == 1:
_default_image = fanart
get_links(name, url, icon, fanart)
elif mode == 2:
play_audio(name, url, icon, fanart)
xbmcplugin.endOfDirectory(__handle__)
| [
"jasonbulis@hotmail.com"
] | jasonbulis@hotmail.com |
a5c3f6f93d1f5122a502c4bff3c84593cf568c96 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2402.py | f7e5ebea73232aadb5a3d4e17099e09452301ea6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | def increment_by_value(d, l, index, value):
if index in d:
d[index] += value
else:
d[index] = value
l.append(index)
l.sort()
def occupy(s):
num_stalls, num_people = [int(x) for x in s.split(" ")]
free, free_index = dict(), []
free[num_stalls] = 1
free_index.append(num_stalls)
count = 0
while 1:
#print("free ", free)
#print("free index ", free_index)
length = free_index[-1]
num_served = free[length]
free[length] = 0
free_index.remove(length)
#print("serving ", num_served, " people")
if length % 2 == 0:
increment_by_value(free, free_index, length // 2, num_served)
increment_by_value(free, free_index, length // 2 - 1, num_served)
max, min = length // 2, length // 2 - 1
else:
increment_by_value(free, free_index, length // 2, num_served * 2)
#free[length // 2] += 1
max, min = length // 2, length // 2
count += num_served
if count >= num_people:
return max, min
def main():
l = int(input())
for i in range(l):
max, min= occupy(input())
print("Case #{}: {} {}".format(i + 1, max, min))
main() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8bae06f64d5c41a7bd5029e336b494470d51175c | aa6f28c0448a0363e307d04646c159471463b355 | /counties.py | 77da9948d6d8accd259d1f055c0a2c6abe72b75b | [] | no_license | Annikaking00/MathematicalElectionAnalysis | d1e4ca56841b3c224c752692ca22c81cd72530c3 | 08b4c059deba137870fab2a4975a42d535f08f6b | refs/heads/master | 2022-11-28T21:30:39.090764 | 2020-07-24T18:51:19 | 2020-07-24T18:51:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,756 | py | # Import necessary packages
from gerrychain import (GeographicPartition, Partition, Graph)
import numpy as np
utah_counties = {1: "Beaver",
2: "Box Elder",
3: "Cache",
4: "Carbon",
5: "Daggett",
6: "Davis",
7: "Duchesne",
8: "Emery",
9: "Garfield",
10: "Grand",
11: "Iron",
12: "Juab",
13: "Kane",
14: "Millard",
15: "Morgan",
16: "Piute",
17: "Rich",
18: "Salt Lake",
19: "San Juan",
20: "Sanpete",
21: "Sevier",
22: "Summit",
23: "Tooele",
24: "Uintah",
25: "Utah",
26: "Wasatch",
27: "Washington",
28: "Wayne",
29: "Weber"}
class SplitCountiesSimple:
"""
Given a partition, determines the number of counties which are divided into multiple districts.
Use this class if you simply want to know the integer number of split counties.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
splits (int): the number of split counties
"""
def __init__(self, alias = "split_counties_simple", county_field = "CountyID"):
"""
Accepts a string which denotes the field where the county info is stored in the graph
"""
self.f = "CountyID"
self.alias = alias
def __call__(self, partition):
"""
Given a partition, determines the number of counties which are divided into multiple districts.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
splits (int)
"""
# Initialize the updater
if partition.parent is None:
return self.initialize(partition)
# Update if necessary
else:
return self.update(partition)
def initialize(self, partition):
"""
This runs the first time the number of county splits is requested. It iterates through all
of the cut edges to determine the number of county splits.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
splits (int): the number of split counties
"""
split_counties = {}
# Iterate through all the cut edges
for edge in partition["cut_edges"]:
# Determine if the edge splits a county
county1 = initial_partition.graph.nodes[edge[0]][self.f]
county2 = initial_partition.graph.nodes[edge[1]][self.f]
if county1 == county2:
# There is a cut edge inside the county
split_counties[county1] = split_counties.get(county1, 0) + 1
self.split_counties = split_counties
return sum([1 for key in split_counties if split_counties[key] > 0])
def update(self, partition):
"""
This is a lower-cost version designed to run when a single flip is made. It updates
the previous count based on the flip information.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
splits (int): the number of split counties
"""
# Get the previous number of split counties
old = partition.parent[self.alias]
# Get the flip information
node = list(partition.flips.keys())[0]
new_assignment = partition.flips[node]
old_assignment = partition.parent.assignment[node]
# Track
flow = 0
county = partition.graph.nodes[node][self.f]
# Iterate through all the neighbors
for neighbor in partition.graph[node]:
neighbor_county = partition.graph.nodes[neighbor][self.f]
# Iterate through all neighbors which are in the same county
if county == neighbor_county:
neighbor_assignment = partition.assignment[neighbor]
if neighbor_assignment == new_assignment:
# The county was split but now it is not
flow -= 1
elif neighbor_assignment == old_assignment:
# The county wasn't split but now it is
flow += 1
return old + flow
class SplitCounties:
"""
Given a partition, determines the number of counties which are divided into multiple districts.
This method has a more useful output that allows you to calculate the Mattingly score for county splits.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
split_counties (dict): A dict mapping counties to another dict,
which maps districts to the number of precincts
in that county which fall in that district.
"""
def __init__(self, alias = "split_counties", county_field = "CountyID", district_field = "US_Distric", start_at_one=False):
"""
Accepts a string which denotes the field where the county info is stored in the graph.
Parameters:
alias (str): When used as an updater function in gerrychain, the alias
county_field (str): the node attribute in the graph which stores the county information
district_field (str): the node attribute in the graph which stores the district information
start_at_one (False): whether or not the county numbering starts at one
"""
self.c = county_field
self.d = district_field
self.alias = alias
self.last_flip = None
self.start_at_one = start_at_one
def __call__(self, partition):
"""
Given a partition, determines the number of counties which are divided into multiple districts.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
split_counties (dict): A dict mapping counties to another dict,
which maps districts to the number of precincts
in that county which fall in that district.
"""
# Initialize the updater
if partition.parent is None:
return self.initialize(partition)
# Update if necessary
else:
return self.update(partition)
def initialize(self, partition):
"""
This runs the first time the number of county splits is requested. It iterates through all
of the cut edges to determine the number of county splits.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
split_counties (dict): A dict mapping counties to another dict,
which maps districts to the number of precincts
in that county which fall in that district.
"""
# Set parameters
num_districts = len(partition.parts)
county_content = {}
# Iterate through all the nodes in the graph
for node in partition.graph.nodes:
# Store the node's information
county = partition.graph.nodes[node][self.c]
district = partition.assignment[node]
# If the county isn't stored yet, store it
if county not in county_content:
if not self.start_at_one:
county_content[county] = {i: 0 for i in range(num_districts)}
else:
county_content[county] = {i: 0 for i in range(1,num_districts+1)}
# Update the totals
county_content[county][district] += 1
return county_content
def update(self, partition):
"""
This is a lower-cost version designed to run when a single flip is made. It updates
the previous count based on the flip information.
Parameters:
partition: Gerrychain GeographicPartition object
Returns:
split_counties (dict): A dict mapping counties to another dict,
which maps districts to the number of precincts
in that county which fall in that district.
"""
# Get the previous info
county_content = partition.parent[self.alias]
# Check to see if the last flip worked
if self.last_flip != partition.flips and self.last_flip is not None:
flipped_node = list(self.last_flip.keys())[0]
new_district = self.last_flip[flipped_node]
if partition.assignment[flipped_node] != new_district:
# The flip wasn't actually carried out. We need to correct it
county = partition.graph.nodes[flipped_node][self.c]
old_district = partition.parent.assignment[flipped_node]
county_content[county][new_district] -= 1
county_content[county][old_district] += 1
if self.last_flip != partition.flips:
# Get the flip information
flipped_node = list(partition.flips.keys())[0]
county = partition.graph.nodes[flipped_node][self.c]
new_district = partition.assignment[flipped_node]
old_district = partition.parent.assignment[flipped_node]
county_content[county][new_district] += 1
county_content[county][old_district] -= 1
#county_content_true = self.initialize(partition)
#if county_content_true != county_content:
# print("Last flip:")
# print(self.last_flip)
# print("Current flip:")
# print(partition.flips)
# print("Computed county content:")
# print(county_content)
# print("Real county content:")
# print(county_content_true)
#
# raise ValueError
self.last_flip = partition.flips
return county_content
def county_splits_score(county_content, three_penalty=100, start_at_one=False, mode="mattingly"):
"""
Calculates a county splits score for a district based on county splits data.
Parameters:
county_content: output from SplitCounties function (dict of dicts)
mode (str): a keyword defining which calculation mode to use
"simple": returns the number of split counties, an integer
"mattingly": uses Mattingly's score function for split counties
start_at_one (bool): whether or not the district numbering starts at one
Returns:
score (float): the calculated fitness score (lower is better)
"""
# Set Parameters
two_split_counties = {}
three_split_counties = {}
num_districts = len(county_content[1])
# Iterate through all the counties
for county, districts in county_content.items():
# Set counters
zeros = 0
nonzero_districts = []
# Iterate through districts
for i in range(start_at_one, start_at_one + num_districts):
if districts[i] == 0:
zeros += 1
else:
nonzero_districts.append(i)
# Determine nature of split
if zeros == num_districts - 2:
# County is split two ways
two_split_counties[county] = nonzero_districts
elif zeros <= num_districts - 3:
# County is split three ways
three_split_counties[county] = nonzero_districts
# We assume that counties will rarely be split > 3 times.
# If so, they fall into this category as well.
# Find the number of splits
num_two_splits = len(two_split_counties)
num_three_splits = len(three_split_counties)
if mode == "simple":
return num_two_splits + num_three_splits
# For the twice-split counties:
# Sum the proportion of each county in the 2nd largest district
two_proportion_score = 0
for county, districts in two_split_counties.items():
district1 = county_content[county][districts[0]]
district2 = county_content[county][districts[1]]
# Find the 2nd largest district by number of precincts
try:
two_proportion_score += np.sqrt(min(district1, district2)/(district1+district2))
except FloatingPointError:
print("These are the district populations:" )
print(district1, district2)
two_proportion_score = 0
# For the 3x-split counties:
# Sum the proportion of each county in the 3rd largest district
three_proportion_score = 0
for county, districts in three_split_counties.items():
district1 = county_content[county][districts[0]]
district2 = county_content[county][districts[1]]
district3 = county_content[county][districts[2]]
# Of the three districts, find the district with the fewest precincts
try:
three_proportion_score += np.sqrt(min(district1, district2, district3)/(district1+district2+district3))
except FloatingPointError:
print("These are the district populations:" )
print(district1, district2, district3)
three_proportion_score = 0
if mode == "mattingly":
# Calculate the score with Mattingly's method
return num_two_splits * two_proportion_score + three_penalty * num_three_splits * three_proportion_score
# In Mattingly's method, we impose a greater penalty for triply-split counties, weighted by three_penalty
# We also assume that counties will rarely be split more than 3 times.
# If so, they fall into the same bucket as the triply-split counties
| [
"jwmurri@byu.edu"
] | jwmurri@byu.edu |
b09d2c84b36ef30c97f1bc81ce017ce57b4ec3d9 | 7f863ca7ed47981f69e42fc1add75ba4acad921a | /code-lab/DSA - Long Hike(Fractional Knapsack).py | f4d12e06067fcbb5aa404a68e6bcd836d7d613b3 | [
"CC0-1.0"
] | permissive | Nahid-Hassan/fullstack-software-development | e9f920be9a999c78f156e6102683b93a50c4e597 | 892ffb33e46795061ea63378279a6469de317b1a | refs/heads/main | 2023-08-19T14:16:57.801056 | 2021-09-29T15:34:35 | 2021-09-29T15:34:35 | 376,595,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | weights = 1
weights_values = [(14, 2), (20, 4), (18, 3)]
def knapsack_slow(weights_values, weights):
values_by_weights = [(x / y, y) for x, y in weights_values]
values_by_weights.sort(reverse=True)
# print(values_by_weights)
print(values_by_weights)
bags = []
for i in range(len(values_by_weights)):
if sum(bags) == weights:
break
if values_by_weights[i][1] <= weights - sum(bags):
bags.append(values_by_weights[i][1])
# weights -= values_by_weights[i][1]
else:
# temp = values_by_weights[i][1]
bags.append(weights)
print(weights + '----------')
return bags
def knapsack_fast(weights_values, weights):
bags = []
volume = 0
temp_weights = weights
values_by_weights = [(x/y, y) for x, y in weights_values]
values_by_weights.sort()
for i in range(len(weights_values)):
if weights == 0:
return (bags, volume)
if values_by_weights[i][1]:
pass
| [
"nahid.cseru@gmail.com"
] | nahid.cseru@gmail.com |
8d38501c599a8b003dba0f29e201972499e35da8 | 1729d77c13453c91968c7a63892c6074566f9394 | /product.py | 4e85f6bb1419da155e527901785ee40af2d25015 | [] | no_license | Mickho/product | 8bc9355c77c227fa1f35afb513f26d9552105625 | 8c1807f9d803256311a4700694e4cce01dd73737 | refs/heads/master | 2020-04-18T02:58:01.569085 | 2019-01-23T14:17:02 | 2019-01-23T14:17:02 | 167,182,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py |
products = []
while True:
name = input('請輸入商品名稱:')
if name == 'q':
break
price = input('請輸入商品價格:')
p = [] # 你可以用 products.append([name, price])
p.append(name)#取代以下四行 把小清單裝進大清單
p.append(price)
products.append(p)
print(products)#
products[0][0]#將大清單的第一個數字取出再取出小清單的第一個數字
print(products[0][1])
for p in products:
print(p)#將小清單一個一個印出來
print(p[0], '價格是:', p[1])
with open('product.csv', 'w') as f:
for i in products:
f.write(p[0] + ',' + p[1] + '\n') | [
"mick.my.ho@gmail.com"
] | mick.my.ho@gmail.com |
a48a82623d06441277cb384655f5746f481b1a6a | f9d17399ba4d366085c1eef078ce3b6a7e00322e | /posts/models.py | ec3252297513569538bea2b0ffc7437f187dc736 | [] | no_license | lechen614/test_taggit | 5a32089c34625aa73c554acb5d0cc795a1271b8b | c1c8725875b6455989d02cfec8e28697bb1e73f1 | refs/heads/master | 2021-09-25T04:50:09.028580 | 2020-04-02T22:38:59 | 2020-04-02T22:38:59 | 252,580,114 | 0 | 0 | null | 2021-09-22T18:53:19 | 2020-04-02T22:39:47 | JavaScript | UTF-8 | Python | false | false | 365 | py | from django.db import models
from taggit.managers import TaggableManager
class Post(models.Model):
title = models.CharField(max_length=250)
body = models.TextField()
created_on = models.DateField(auto_now_add=True)
slug = models.SlugField(unique=True, max_length=100)
tags = TaggableManager()
def __str__(self):
return self.title | [
"lechen614@gmail.com"
] | lechen614@gmail.com |
fae2b2d18a21dbc203ec4fb792faef89a06acefb | 71cd90793b3a409cea5de1abfcbe665cb1e7e754 | /Project8/app8/forms.py | d76e37a84a27367326e22eb389f0d488a98ef243 | [] | no_license | SyedShahzad1162/syed | 81b9717e98531d5a7b794bcc2409c3387633767b | a50d53c264309522b1063fe265f45c45808263b3 | refs/heads/master | 2020-09-20T02:09:03.044013 | 2019-11-27T05:45:14 | 2019-11-27T05:45:14 | 224,353,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py |
from django import forms
from .models import EmployeeModel
class EmployeeForm(forms.ModelForm):
class Meta:
model = EmployeeModel
fields = "__all__" | [
"57654741+SyedShahzad1162@users.noreply.github.com"
] | 57654741+SyedShahzad1162@users.noreply.github.com |
c82435c26871d260aac691e66a99d04e0c494cb1 | 95735e513af2d56fbc1d971b65f6181f43232b62 | /callmail_project/media/attachments/models.py | 9b5d4db8d618ca792883efcc0805159098bbb27a | [
"MIT"
] | permissive | q8groups/callnmail | c0d8afab18aec5c4a99eef9e09ec239d8d3dbaed | e3f4c01050bee1442545c8e82eba3fb1efb5b3ed | refs/heads/master | 2021-01-20T10:49:49.080812 | 2016-03-05T17:30:13 | 2016-03-05T17:30:13 | 29,821,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | import os
from django.db import models
from django.contrib.auth.models import User
class Mail(models.Model):
user = models.ForeignKey(User)
from_user = models.EmailField()
to_user = models.EmailField()
subject = models.CharField(max_length=255, blank=True, null=True)
body = models.TextField(blank=True, null=True)
date_email = models.DateTimeField()
def __unicode__(self):
return 'From: %s To: %s' %(self.from_user, self.to_user)
class MailAttachment(models.Model):
mail = models.ForeignKey(Mail)
attachment = models.FileField(upload_to='attachments/')
def __unicode__(self):
return "%s's mail attachment" %(self.mail.user)
def filename(self):
return os.path.basename(self.attachment.name) | [
"ubuntu@ip-172-31-1-171.eu-west-1.compute.internal"
] | ubuntu@ip-172-31-1-171.eu-west-1.compute.internal |
b959b2bb3ca5dd725b5a193fc0f21f9d5e7bd685 | 9d7d2c257d30b5dce45116f31bd803691a2a1a2d | /raidfinder/frames/xoro_shiro.py | a9c0f4e41f7c00eb08914287e4ded7fd759a6dfc | [
"MIT"
] | permissive | cottonmalone/raid-finder-bot | e413f00a59040e9509b9c1396cbcd25fc023105f | e39cd94bdd416ea9b4cd344ed4cebc9751b14149 | refs/heads/master | 2021-07-01T23:39:09.971736 | 2020-02-18T14:56:00 | 2020-02-18T14:56:00 | 240,857,317 | 0 | 0 | MIT | 2021-06-11T18:01:39 | 2020-02-16T08:32:08 | Python | UTF-8 | Python | false | false | 914 | py | import sys
S1 = 0x82A2B175229D6A5B
MAX_INT = 0xFFFFFFFFFFFFFFFF
def rotl(x, k):
return ((x << k) | (x >> (64 - k))) & MAX_INT
class XoroShiro:
def __init__(self, seed):
self.s0 = seed
self.s1 = S1
def next(self):
result = (self.s0 + self.s1) & MAX_INT
self.s1 ^= self.s0
self.s0 = rotl(self.s0, 24) ^ self.s1 ^ ((self.s1 << 16) & MAX_INT)
self.s1 = rotl(self.s1, 37)
return result
def next_int(self, mask, max_num=sys.maxsize, offset=None):
result = self.next() & mask
while result >= max_num:
result = self.next() & mask
if offset is not None:
offset += 1
if offset is not None:
return result, offset
return result
def next_frame(self):
self.s0 = (self.s0 + self.s1) & MAX_INT
def clone(self):
return XoroShiro(self.s0)
| [
"cotton.malone@mail.com"
] | cotton.malone@mail.com |
51461f3351458fdc755f94a8d07918c66f393506 | 4b258823918de3e580a1331ba92fc5468d9af21b | /fidu/fidu/middlewares.py | 0389210ea5e9243df12e764650a1ae28536f2e5f | [] | no_license | ivanieves/env | 519c0a67a145d1abd02858fd29f7a879b9f2f228 | 5def3155dd4652e3807327767c2dc8e63ddf3ff5 | refs/heads/master | 2020-08-01T15:05:12.899386 | 2019-09-26T10:12:16 | 2019-09-26T10:12:16 | 211,027,842 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class FiduSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class FiduDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"ia.nieves2230@uniandes.edu.co"
] | ia.nieves2230@uniandes.edu.co |
cfbf28112e456f0999b8c8dc64ea310f31fb5227 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/leveleditor/worldData/del_fuego_building_int_tattoo.py | 615b94089f0e48bcfbf591d8fc665740418ee377 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {
'Objects': {
'1153434880.63dzlu0': {
'Type': 'Building Interior',
'Name': '',
'AdditionalData': [
'interior_spanish_store_tattoo'],
'Instanced': True,
'Objects': {
'1201136836.84dxschafe': {
'Type': 'Door Locator Node',
'Name': 'door_locator',
'Hpr': VBase3(-180.0, 0.0, 0.0),
'Pos': Point3(-7.141, -11.488, 0.0060000000000000001),
'Scale': VBase3(1.0, 1.0, 1.0) } },
'Visual': {
'Model': 'models/buildings/interior_spanish_npc' } } },
'Node Links': [],
'Layers': {
'Collisions': [
'1184008208.59kmuller',
'1184016064.62kmuller',
'1184013852.84kmuller',
'1185822696.06kmuller',
'1184006140.32kmuller',
'1184002350.98kmuller',
'1184007573.29kmuller',
'1184021176.59kmuller',
'1184005963.59kmuller',
'1188324241.31akelts',
'1184006537.34kmuller',
'1184006605.81kmuller',
'1187139568.33kmuller',
'1188324186.98akelts',
'1184006730.66kmuller',
'1184007538.51kmuller',
'1184006188.41kmuller',
'1184021084.27kmuller',
'1185824396.94kmuller',
'1185824250.16kmuller',
'1185823630.52kmuller',
'1185823760.23kmuller',
'1185824497.83kmuller',
'1185824751.45kmuller',
'1187739103.34akelts',
'1188323993.34akelts',
'1184016538.29kmuller',
'1185822200.97kmuller',
'1184016225.99kmuller',
'1195241421.34akelts',
'1195242796.08akelts',
'1184020642.13kmuller',
'1195237994.63akelts',
'1184020756.88kmuller',
'1184020833.4kmuller',
'1185820992.97kmuller',
'1185821053.83kmuller',
'1184015068.54kmuller',
'1184014935.82kmuller',
'1185821432.88kmuller',
'1185821701.86kmuller',
'1195240137.55akelts',
'1195241539.38akelts',
'1195238422.3akelts',
'1195238473.22akelts',
'1185821453.17kmuller',
'1184021269.96kmuller',
'1185821310.89kmuller',
'1185821165.59kmuller',
'1185821199.36kmuller',
'1185822035.98kmuller',
'1184015806.59kmuller',
'1185822059.48kmuller',
'1185920461.76kmuller',
'1194984449.66akelts',
'1185824206.22kmuller',
'1184003446.23kmuller',
'1184003254.85kmuller',
'1184003218.74kmuller',
'1184002700.44kmuller',
'1186705073.11kmuller',
'1187658531.86akelts',
'1186705214.3kmuller',
'1185824927.28kmuller',
'1184014204.54kmuller',
'1184014152.84kmuller'] },
'ObjectIds': {
'1153434880.63dzlu0': '["Objects"]["1153434880.63dzlu0"]',
'1201136836.84dxschafe': '["Objects"]["1153434880.63dzlu0"]["Objects"]["1201136836.84dxschafe"]' } }
extraInfo = {
'camPos': Point3(-1202.78, 260.68599999999998, 149.845),
'camHpr': VBase3(-98.880099999999999, -28.781600000000001, 0),
'focalLength': 1.3999999761599999 }
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
093e5e2ad83270199e605e12b7c5f8296b43bc2c | d850772af41ccb06b934cd0d6bdca8ffc706cdf4 | /run/vis_evalgen.py | 2d9bdd5af8b2bb233f912f0da33499f7a7148718 | [
"MIT"
] | permissive | yamad07/vjvae | b8f15877c4e534c93a73966c7204446dcc254672 | dd8d6607f5ec6c46df1794f903b42aee890d970b | refs/heads/master | 2022-08-06T23:40:38.885699 | 2020-05-31T23:25:38 | 2020-05-31T23:25:38 | 264,443,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,056 | py | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import json, logging
import numpy as np
import tensorflow as tf
from collections import defaultdict, OrderedDict
from sklearn.cluster import KMeans
from data import *
from utils.analysis import *
from utils.experiments import *
from models.visual import *
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='VisualVAE - Evaluation Task Generation')
arg_parser.add_argument('task', choices=['mnist', 'cifar', 'bam'], help='name of the task')
arg_parser.add_argument('data_path', help='path to data (not required for original MNIST)')
arg_parser.add_argument('data_split', choices=['train', 'test'], default='test', help='data split (train, test (default))')
arg_parser.add_argument('latent_path', help='path to latent vectors')
arg_parser.add_argument('out_path', help='path to output')
arg_parser.add_argument('--num_examples', type=int, default=4, help='number of examples for evaluation (default: 4)')
arg_parser.add_argument('--num_tasks', type=int, default=20, help='number of tasks for evaluation (default: 20)')
arg_parser.add_argument('--kmeans', action='store_true', help='use k-means clustering instead of class means')
arg_parser.add_argument('--merge', action='store_true', help='merge classes with insufficient points into closest class')
args = arg_parser.parse_args()
# check if directory already exists
if os.path.exists(args.out_path):
print("[Error] '%s' already exists." % (args.out_path,))
sys.exit()
# make necessary directories
os.mkdir(args.out_path)
setup_logging(os.path.join(args.out_path, 'results.log'))
# set up visual model
if args.task == 'mnist':
dataset = Mnist(split='test', data_path=args.data_path)
elif args.task == 'cifar':
dataset = Cifar(args.data_path)
elif args.task == 'bam':
dataset = Bam(args.data_path)
dataset.filter_labels(['emotion_gloomy', 'emotion_happy', 'emotion_peaceful', 'emotion_scary'])
dataset.filter_uncertain(round_up=False)
dataset.make_multiclass()
# load latent vectors
latents = np.load(args.latent_path)
dataset.labels = dataset.labels[:latents.shape[0]] # truncate if last batch was dropped
if args.kmeans:
kmeans = KMeans(n_clusters=3).fit(latents)
mean_latents = kmeans.cluster_centers_
dataset.labels = kmeans.labels_
dataset.label_descs = ['cluster-%d' % i for i in range(mean_latents.shape[0])]
logging.info("Computed %d clusters using k-means." % mean_latents.shape[0])
# calculate means
else:
mean_latents = np.zeros([len(dataset.label_descs), latents.shape[1]])
for c in range(len(dataset.label_descs)):
lbl_idcs = np.where(dataset.labels == (c * np.ones_like(dataset.labels)))
mean_latents[c] = np.mean(latents[lbl_idcs], axis=0)
# reduce rare classes
if args.merge:
large_labels, small_labels = [], []
for c in range(len(dataset.label_descs)):
lbl_count = np.sum(dataset.labels == c)
if lbl_count < (args.num_examples + args.num_tasks):
small_labels.append(c)
else:
large_labels.append(c)
logging.info("Found %d classes with insufficient datapoints." % len(small_labels))
# reassign small labels to larger ones
for c in small_labels:
closest_labels, closest_dists = get_closest(mean_latents[c], mean_latents, [i for i in large_labels])
lbl_idcs = np.where(dataset.labels == (c * np.ones_like(dataset.labels)))
dataset.labels[lbl_idcs] = closest_labels[0]
logging.info("Merged '%s' into '%s' with distance %.2f." % (dataset.label_descs[c], dataset.label_descs[closest_labels[0]], closest_dists[0]))
for c in large_labels:
lbl_idcs = np.where(dataset.labels == (c * np.ones_like(dataset.labels)))
mean_latents[c] = np.mean(latents[lbl_idcs], axis=0)
mean_latents = mean_latents[large_labels]
# generate evaluation task
logging.info("Exporting evaluation samples...")
classes, examples, tasks = gen_eval_task(mean_latents, latents, dataset.labels, dataset.label_descs, args.num_examples, args.num_tasks)
eval_config = OrderedDict([
('name', args.task.upper()),
('code', ''),
('data_path', ''),
('result_path', ''),
('classes', [dataset.label_descs[l] for l in classes]),
('examples', examples),
('tasks', tasks)
])
eval_config_path = os.path.join(args.out_path, 'eval.json')
with open(eval_config_path, 'w', encoding='utf8') as fop:
json.dump(eval_config, fop)
logging.info("Saved evaluation configuration with %d examples and %d tasks to '%s'." % (len(examples), len(tasks), eval_config_path))
| [
"work@personads.me"
] | work@personads.me |
094fe75ca64a9117c1ea25f9283dba2b9d8013fa | 550065648feec8b0525c2a61c3cce4509b0eaa24 | /app/schemas/__init__.py | f27f58e692b3381f3e25b726ff0ea398de574f51 | [] | no_license | springionic/flask-template | 309fccb5a038ae6d70d2d850d9f1da8ac507a944 | 350ad3269a93df8b50f3cc7f92578fe85b25518a | refs/heads/master | 2021-07-08T02:40:13.522739 | 2020-02-24T10:05:29 | 2020-02-24T10:05:29 | 228,383,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # -*- coding: utf-8 -*-
# created by lilei on 2019/11/10
| [
"lei.li@longleding.com"
] | lei.li@longleding.com |
e8e3284f06acb25496c816932d828b9e6ed1c842 | 1993376f454fc25ed0d21938c1f0b9c2751c4517 | /gpim/gpbayes/acqfunc.py | 9febfa088d3426fa70c7a8230212b1fff3a875dc | [
"MIT"
] | permissive | ziatdinovmax/GPim | 1d1c62cce4b0e44a4f4e6c828817333f374ac7d8 | 022f8240b6b0c1b283d38d4d72fb30a567ec0f6a | refs/heads/master | 2023-05-10T20:23:29.178073 | 2022-08-17T23:55:58 | 2022-08-17T23:55:58 | 241,448,693 | 53 | 12 | MIT | 2021-05-08T21:17:23 | 2020-02-18T19:28:27 | Python | UTF-8 | Python | false | false | 2,598 | py | '''
acqfunc.py
======
Acquisition functions
'''
import numpy as np
from scipy.stats import norm
def confidence_bound(gpmodel, X_full, **kwargs):
"""
Confidence bound acquisition function
(a modification of upper confidence bound)
Args:
gpmodel (gpim reconstructor object):
Surrogate function that allows computing
mean and standard deviation
X_full (ndarray):
Full grid indices
**alpha (float):
:math:`\\alpha` coefficient in :math:`\\alpha \\mu + \\beta \\sigma`
**beta (float):
:math:`\\beta` coefficient in :math:`\\alpha \\mu + \\beta \\sigma`
Returns:
Acquisition function values and GP prediction (mean + stddev)
"""
alpha = kwargs.get("alpha", 0)
beta = kwargs.get("beta", 1)
mean, sd = gpmodel.predict(X_full, verbose=0)
acq = alpha * mean + beta * sd
return acq, (mean, sd)
def expected_improvement(gpmodel, X_full, X_sparse, **kwargs):
"""
Expected improvement acquisition function
Args:
gpmodel (gpim reconstructor object):
Surrogate function that allows computing
mean and standard deviation
X_full (ndarray):
Full grid indices
X_sparse (ndarray):
Sparse grid indices
**xi (float):
xi constant value
Returns:
Acquisition function values and GP prediction (mean + stddev)
"""
xi = kwargs.get("xi", 0.01)
mean, sd = gpmodel.predict(X_full, verbose=0)
mean_sample, _ = gpmodel.predict(X_sparse, verbose=0)
mean_sample_opt = np.nanmax(mean_sample)
imp = mean - mean_sample_opt - xi
z = imp / sd
acq = imp * norm.cdf(z) + sd * norm.pdf(z)
return acq, (mean, sd)
def probability_of_improvement(gpmodel, X_full, X_sparse, **kwargs):
"""
Probability of improvement acquisition function
Args:
gpmodel (gpim reconstructor object):
Surrogate function that allows computing
mean and standard deviation
X_full (ndarray):
Full grid indices
X_sparse (ndarray):
Sparse grid indices
**xi (float):
xi constant value
Returns:
Acquisition function values and GP prediction (mean + stddev)
"""
xi = kwargs.get("xi", 0.01)
mean, sd = gpmodel.predict(X_full, verbose=0)
mean_sample = gpmodel.predict(X_sparse, verbose=0)
mean_sample_opt = np.nanmax(mean_sample)
z = mean - mean_sample_opt - xi
z = z / sd
acq = norm.cdf(z)
return acq, (mean, sd)
| [
"ziatdinovmax@gmail.com"
] | ziatdinovmax@gmail.com |
78e251a245f2d2874cde70667cda12212403b642 | e32ee62a3db47f46618b08940c3d5eed29676a6c | /kola2.py | 251ace050890ec070857d7beb17bdf46f6751621 | [] | no_license | ATomkowiak/Milionerzy | 25b49aa8ed661023d44958229c92cb055027d18e | 26a7b4c3f1b12cd0e081f70c24eae8af7e4a4d1f | refs/heads/master | 2020-05-25T23:31:37.818454 | 2019-06-12T11:23:10 | 2019-06-12T11:23:10 | 188,036,226 | 0 | 0 | null | 2019-06-12T11:23:11 | 2019-05-22T12:40:17 | Python | UTF-8 | Python | false | false | 9,279 | py | import random
import time
uzycie= [] #### zmienna która zapisuje użyte koła
def fifty(Pytania):
####funkcja pozwala na użycie koła ratunkowego pół na pół w grze milionerzyself.
#### Usuwa ona 2 losowo wybrane błędne odpowiedzi i wyświetla jedną błędną i jedną prawidłową ułatwiając wybór graczowiself.
#### abcd to kolejne odpowiedzi w danym pytaniu
print('Skorzystałeś z koła pół na pół, oto pozostałe odpowiedzi')
temp=random.randrange(1,3)
while True:
temp_b=1
temp_c=2,
temp_d=3
if temp == temp_b:
pytania.wyniki1.remove(2,3)
print(Pytania.wyniki)
break
elif temp == temp_c:
pytania.wyniki1.remove(1,3)
print(Pytania.wyniki)
break
elif temp==temp_d:
pytania.wyniki1.remove(1,2)
print(Pytania.wyniki)
break
def telefon(Pytania):
#### funkcja zwraca prawdidłową odpowiedź z 75% poprawnością
prop = random.random()
while True:
if prop>0.25:
pytania.wyniki1.remove(1,2,3)
print(Pytania.wyniki)
break
else:
pytania.wyniki1.remove(0,1,2)
print(Pytania.wyniki)
break
def publika(Pytania):
#### funkcja pozwala użyć koła pomoc od publicznośći i zwraca prawidłową odpowiedź z 60% poprawnością
prawd=random.random()
while True:
if prawd>0.4:
pytania.wyniki1.remove(1,2,3)
print(Pytania.wyniki)
break
else:
pytania.wyniki1.remove(0,1,2)
print(Pytania.wyniki)
break
def kola(Pytania):
#### funkcja pozwala wyśwetlić koła ratunkowe w grze milionerzy oraz skorzystać z nich a także odejść ze zdobytą już kwotą
while True:
print('Jeśli chcesz skrorzystać z kołą ratunkowego wpisz koło')
time.sleep(1)
print('Jeśli chcesz odejść z kwotą gwarantowaną wpisz kwota')
time.sleep(1)
print('Jeśli chcesz grać dalej bez koła wpisz gram')
mozliwosc_kola = input()
if mozliwosc_kola == 'kwota':
print("Gratulacje!")
time.sleep(1)
print("Wygrałeś", pytania.hajs, 'zł')
menu.wyjście()
elif mozliwosc_kola == 'koło':
if len(uzycie)==0:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skorzystać z pół na pół wciśnij 1')
print('Jeśli chcesz skrozystać z telefonu do przyjaciela wciśnij 2')
print('Jeśli chcesz skorzystać z pomocy publiczności wciśnij 3')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 1:
uzycie.append('fifty')
fifty()
break
elif wybor == 2:
uzycie.append('tele')
telefon()
break
elif wybor ==3:
uzycie.append('publika')
publika()
break
elif wybor == 4:
print('wracamy do gry')
break
elif len(uzycie)==1:
if 'fifty' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skrozystać z telefonu do przyjaciela wciśnij 2')
print('Jeśli chcesz skorzystać z pomocy publiczności wciśnij 3')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 2:
uzycie.append('tele')
telefon()
break
elif wybor ==3:
uzycie.append('publika')
publika()
break
elif wybor == 4:
print('wracamy do gry')
break
elif 'tele' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skorzystać z pół na pół wciśnij 1')
print('Jeśli chcesz skorzystać z pomocy publiczności wciśnij 3')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 1:
uzycie.append('fifty')
fifty()
break
elif wybor ==3:
uzycie.append('publika')
publika()
break
elif wybor == 4:
print('wracamy do gry')
break
elif 'publika' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skorzystać z pół na pół wciśnij 1')
print('Jeśli chcesz skrozystać z telefonu do przyjaciela wciśnij 2')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 1:
uzycie.append('fifty')
fifty()
break
elif wybor == 2:
uzycie.append('tele')
telefon()
break
elif wybor == 4:
print('wracamy do gry')
break
elif len(uzycie)==2:
if 'fifty' in uzycie:
if 'tele' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skorzystać z pomocy publiczności wciśnij 3')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor ==3:
uzycie.append('publika')
publika()
break
elif wybor == 4:
print('wracamy do gry')
break
elif 'publika' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skrozystać z telefonu do przyjaciela wciśnij 2')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 2:
uzycie.append('tele')
telefon()
break
elif wybor == 4:
print('wracamy do gry')
break
elif 'publika' in uzycie:
if 'tele' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skorzystać z pół na pół wciśnij 1')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 1:
uzycie.append('fifty')
fifty()
break
elif wybor == 4:
print('wracamy do gry')
break
elif 'fifty' in uzycie:
while True:
print('Którego koła chcesz użyć?')
print('Jeśli chcesz skrozystać z telefonu do przyjaciela wciśnij 2')
print('Jeśli nie chcesz korzystać z koła wciśnij 4')
wybor=int(input())
if wybor == 2:
uzycie.append('tele')
telefon(pytanie1.pytanie)
break
elif wybor == 4:
print('wracamy do gry')
break
elif len(uzycie)==3:
print('nie masz już kół')
break
elif mozliwosc_kola == 'gram':
print('wracamy do gry')
break
| [
"50331900+ATomkowiak@users.noreply.github.com"
] | 50331900+ATomkowiak@users.noreply.github.com |
6d0af6e60b4a6f03950d4d6c993e137e440c9a69 | b73f0efbafab231b6df81bbed962b84609f1a92d | /Graph/BFS.py | ed9d5d72a8327be203f10c6a2855af54fc71a52d | [] | no_license | laits1/Algorithm | 69a4f55b5317362fdbba6f6c8a1c8d2ded3234cd | 976123f31e71578048f2738bf0f807b723d30d46 | refs/heads/master | 2023-06-26T08:27:13.317966 | 2021-07-31T08:03:16 | 2021-07-31T08:03:16 | 389,960,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | # BFS(Breadth-First Search)
# BFS는 너비 우선 탐색이라고도 부르며, 그래프에서 가까운 노드부터 우선적으로 탐색하는 알고리즘
# BFS는 큐 자료구조를 이용
# 1. 탐색 시작 노드를 큐에 삽입하고 방문 처리
# 2. 큐에서 노드를 꺼낸 뒤에 해당 노드의 인접 노드 중에서 방문하지 않은 모든 노드를 모두 큐에 삽입하고 방문 ㄴ처리
# 3. 더 이상 2번의 과정을 수행할 수 없을때까지 방복
from collections import deque
# BFS 메서드 정의
def bfs(graph, start, visited):
# 큐(Queue) 구현을 위해 deque 라이브러리 사용
queue = deque([start])
# 현재 노드를 방문 처리
visited[start] = True
# 큐가 빌 때까지 반복
while queue:
# 큐에서 하나의 원소를 뽑아 출력하기
v = queue.popleft()
print(v, end= ' ')
# 아직 방문하지 않은 인접한 원소들은 큐에 삽입
for i in graph[v]:
if not visited[i]:
queue.append(i)
visited[i] = True
# 각 노드가 연결된 정보를 표현(2차원 리스트)
graph = [
[],
[2, 3, 8],
[1, 7],
[1, 4, 5],
[3, 5],
[3, 4],
[7],
[2, 6, 8],
[1, 7]
]
# 각 노드가 방문된 정보를 표현 (1차원 리스트)
visited = [False] * 9
# 정의된 BFS 함수 호출
bfs(graph, 1, visited) | [
"thsehdrl1@gmail.com"
] | thsehdrl1@gmail.com |
04a2cdcdfedb4854f4b01a254dfe883f0ffe642e | 83a63ce2d9b7f8148589c44f1d1085a9c80682fc | /yewu/jiekou/select_myselfsongs.py | 20c88ede0df5b364e67e740e9b28d850607cf5d0 | [] | no_license | lijiajing068/requests_test | 1f21c8e9245830a55e4a0d13d28a0b6bf52c0b67 | eecef45c7cdae45d0962691d87bdcb29d5f38eb5 | refs/heads/master | 2022-12-11T03:48:36.714351 | 2020-09-07T07:06:03 | 2020-09-07T07:06:03 | 292,235,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | import unittest
import requests
from yewu.common.Common_old import encrypted_request,basic_data_headers
#from wangyi_login import LoginTest
class SelectSongs(unittest.TestCase):
def test_select_songs(self,uid="1541194463"):
#self.id=LoginTest.test_login_success()
#print(self.id)
#uid为登录后返回的用户id
base_url = "http://music.163.com/weapi/user/playlist"
data={'offset': 0, 'uid': uid, 'limit': 1000, 'csrf_token': ''}
payload = encrypted_request(data)
r=requests.post(base_url,data=payload,headers=basic_data_headers())
print(r.content)
self.result=r.json()
#print(self.result)
self.assertEqual(self.result['playlist'][0]['creator']['nickname'],'樱桃你个车厘子1808')
#return result
if __name__ == '__main__':
unittest.main() | [
"863667546@qq.com"
] | 863667546@qq.com |
8da58df298c3f417894362409649e16ba045b26b | 11806ceb316950e41725f8acb2d7e5ecea6036a1 | /biomass/core.py | fb61327fbf5acde1d04b4780683f21c215b7631e | [
"Apache-2.0"
] | permissive | TrendingTechnology/biomass | 82cb65892c467cc236ce212caa9ff21cc9812e22 | 2e2b262f6d99834d2d1b44a1304fcf5395b566ef | refs/heads/master | 2023-05-28T03:50:38.016708 | 2021-06-01T04:40:23 | 2021-06-01T04:40:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,116 | py | """BioMASS core functions"""
import multiprocessing
import os
from dataclasses import dataclass
from importlib import import_module
from pathlib import Path
from typing import Any, Optional
from .analysis import InitialConditionSensitivity, ParameterSensitivity, ReactionSensitivity
from .dynamics import SignalingSystems
from .estimation import GeneticAlgorithmContinue, GeneticAlgorithmInit
from .exec_model import ModelObject
__all__ = ["Model", "optimize", "optimize_continue", "run_simulation", "run_analysis"]
@dataclass
class Model(object):
"""
The BioMASS model object.
Attributes
----------
pkg_name: str
Path (dot-sepalated) to a biomass model directory.
Use '__package__'.
"""
pkg_name: str
def _load_model(self) -> Any:
try:
biomass_model = import_module(self.pkg_name)
return biomass_model
except ImportError:
p = Path(self.pkg_name.replace(".", os.sep))
print(f"cannot import '{p.name}' from '{p.parent}'.")
def create(self, show_info: bool = False) -> ModelObject:
"""
Build a biomass model.
Parameters
----------
show_info : bool (default: False)
Set to 'True' to print the information related to model size.
Examples
--------
>>> from biomass import Model
>>> import your_model
>>> model = Model(your_model.__package__).create()
"""
model = ModelObject(self.pkg_name.replace(".", os.sep), self._load_model())
if model.sim.normalization:
for obs_name in model.obs:
if (
isinstance(model.sim.normalization[obs_name]["timepoint"], int)
and not model.sim.t[0]
<= model.sim.normalization[obs_name]["timepoint"]
<= model.sim.t[-1]
):
raise ValueError("Normalization timepoint must lie within sim.t.")
if not model.sim.normalization[obs_name]["condition"]:
model.sim.normalization[obs_name]["condition"] = model.sim.conditions
else:
for c in model.sim.normalization[obs_name]["condition"]:
if c not in model.sim.conditions:
raise ValueError(
f"Normalization condition '{c}' is not defined in sim.conditions."
)
if show_info:
model_name = Path(model.path).name
print(
f"{model_name} information\n" + ("-" * len(model_name)) + "------------\n"
f"{len(model.species):d} species\n"
f"{len(model.parameters):d} parameters, "
f"of which {len(model.sp.idx_params):d} to be estimated"
)
return model
def _check_optional_arguments(
end: Optional[int],
options: Optional[dict],
) -> None:
if options is None:
pass
elif isinstance(options, dict):
if options["local_search_method"].lower() not in ["mutation", "powell", "de"]:
raise ValueError(
f"'{options['local_search_method']}': "
"Invalid local_search_method. Should be one of ['mutation', 'Powell', 'DE']"
)
elif (
isinstance(end, int)
and options["local_search_method"].lower() == "de"
and options["workers"] != 1
):
raise AssertionError(
"daemonic processes are not allowed to have children. Set options['workers'] to 1."
)
else:
raise TypeError("options must be dict or None.")
def optimize(
model: ModelObject,
start: int,
end: Optional[int] = None,
options: Optional[dict] = None,
) -> None:
"""
Estimate model parameters from experimental data.
Parameters
----------
model : ModelObject
Model for parameter estimation.
start : int
Index of parameter set to estimate.
end : int, optional
When `end` is specified, parameter sets from `start` to `end` will be estimated.
options : dict, optional
* popsize : int (default: 5)
A multiplier for setting the total population size.
The population has popsize * len(search_param) individuals.
* max_generation : int (default: 10000)
Stop optimization if Generation > max_generation.
* initial_threshold : float (default: 1e12)
Threshold on objective function value used to generate initial population.
Default value is 1e12 (numerically solvable).
* allowable_error : float (default: 0.0)
Stop optimization if Best Fitness <= allowable_error.
* local_search_method : str (default: 'mutation')
Method used in local search. Should be one of
* 'mutation' : NDM/MGG
* 'Powell' : Modified Powell method
* 'DE' : Differential Evolution (strategy: best2bin)
* n_children : int (default: 200)
(method='mutation') The number of children generated in NDM/MGG.
* maxiter : int (default: 10)
(method='Powell' or 'DE') The maximum number of iterations
over which the entire population is evolved.
* workers : int (default: -1 if `end` is None else 1)
(method='DE') The population is subdivided into workers sections and
evaluated in parallel (uses multiprocessing.Pool). Supply -1 to use
all available CPU cores. Set workers to 1 when searching multiple
parameter sets simultaneously.
* overwrite : bool (default: False)
If True, the out/n folder will be overwritten.
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, optimize
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> optimize(
... model=model, start=1, end=10,
... options={
... 'max_generation': 10000,
... 'allowable_error': 0.5
... }
... )
"""
if options is None:
options = {}
options.setdefault("popsize", 5)
options.setdefault("max_generation", 10000)
options.setdefault("initial_threshold", 1e12)
options.setdefault("allowable_error", 0.0)
options.setdefault("local_search_method", "mutation")
options.setdefault("n_children", 200)
options.setdefault("maxiter", 10)
options.setdefault("workers", -1 if end is None else 1)
options.setdefault("overwrite", False)
_check_optional_arguments(end, options)
ga_init = GeneticAlgorithmInit(model, **options)
if end is None:
ga_init.run(int(start))
else:
n_proc = max(1, multiprocessing.cpu_count() - 1)
with multiprocessing.Pool(processes=n_proc) as p:
for _ in p.imap_unordered(
ga_init.run,
range(int(start), int(end) + 1),
):
pass
def optimize_continue(
model: ModelObject,
start: int,
end: Optional[int] = None,
options: Optional[dict] = None,
) -> None:
"""
Continue running optimization from where you stopped in the last parameter search.
Parameters
----------
model : ModelObject
Model for parameter estimation.
start : int
Index of parameter set to estimate.
end : int, optional
When `end` is specified, parameter sets from `start` to `end` will be estimated.
options : dict, optional
* popsize : int (default: 5)
A multiplier for setting the total population size.
The population has popsize * len(search_param) individuals.
* max_generation : int (default: 15000)
Stop optimization if Generation > max_generation.
* initial_threshold : float (default: 1e12)
Threshold on objective function value used to generate initial population.
Default value is 1e12 (numerically solvable).
* allowable_error : float (default: 0.0)
Stop optimization if Best Fitness <= allowable_error.
* local_search_method : str (default: 'mutation')
Method used in local search. Should be one of
* 'mutation' : NDM/MGG
* 'Powell' : Modified Powell method
* 'DE' : Differential Evolution (strategy: best2bin)
* n_children : int (default: 200)
(method='mutation') The number of children generated in NDM/MGG.
* maxiter : int (default: 10)
(method='Powell' or 'DE') The maximum number of iterations
over which the entire population is evolved.
* workers : int (default: -1 if `end` is None else 1)
(method='DE') The population is subdivided into workers sections and
evaluated in parallel (uses multiprocessing.Pool). Supply -1 to use
all available CPU cores. Set workers to 1 when searching multiple
parameter sets simultaneously.
* p0_bounds : list of floats (default: [0.1, 10.0])
Generate initial population using best parameter values in the last
parameter search.
- lower_bound = po_bounds[0] * best_parameter_value
- upper_bound = p0_bounds[1] * best_parameter_value
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, optimize_continue
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> optimize_continue(
... model=model, start=1, end=10,
... options={
... 'max_generation': 20000,
... 'allowable_error': 0.5
... }
... )
"""
if options is None:
options = {}
options.setdefault("popsize", 5)
options.setdefault("max_generation", 15000)
options.setdefault("initial_threshold", 1e12)
options.setdefault("allowable_error", 0.0)
options.setdefault("local_search_method", "mutation")
options.setdefault("n_children", 200)
options.setdefault("maxiter", 10)
options.setdefault("workers", -1 if end is None else 1)
options.setdefault("p0_bounds", [0.1, 10.0])
_check_optional_arguments(end, options)
ga_continue = GeneticAlgorithmContinue(model, **options)
if end is None:
ga_continue.run(int(start))
else:
n_proc = max(1, multiprocessing.cpu_count() - 1)
with multiprocessing.Pool(processes=n_proc) as p:
for _ in p.imap_unordered(
ga_continue.run,
range(int(start), int(end) + 1),
):
pass
def run_simulation(
model: ModelObject,
*,
viz_type: str = "original",
show_all: bool = False,
stdev: bool = False,
save_format: str = "pdf",
param_range: Optional[dict] = None,
) -> None:
"""
Simulate ODE model with estimated parameter values.
Parameters
----------
model : ModelObject
Model for simulation.
viz_type : str
* 'average':
The average of simulation results with parameter sets in "out/".
* 'best':
The best simulation result in "out/", simulation with
"best_fit_param".
* 'original':
Simulation with the default parameters and initial values
defined in "set_model.py".
* 'n(=1,2,...)':
Use the parameter set in "out/n/".
* 'experiment'
Draw the experimental data written in observable.py without
simulation results.
show_all : bool
Whether to show all simulation results.
stdev : bool
If True, the standard deviation of simulated values will be shown
(only available for 'average' visualization type).
save_format : str (default: "pdf")
Either "png" or "pdf", indicating whether to save figures
as png or pdf format.
param_range : dict, optional
* orientation : str (default: 'portrait')
Either 'portrait' or 'landscape'.
* distribution : str (default: 'boxenplot')
Either 'boxplot' or 'boxenplot'.
* scatter : bool (default: False)
If True, draw a stripplot.
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, run_simulation
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> run_simulation(
... model,
... viz_type='average',
... show_all=False,
... stdev=True,
... save_format="png",
... )
"""
if viz_type not in ["best", "average", "original", "experiment"] and not viz_type.isdecimal():
raise ValueError(
"Available viz_type are: 'best','average','original','experiment','n(=1, 2, ...)'"
)
if save_format not in ["pdf", "png"]:
raise ValueError("save_format must be either 'pdf' or 'png'.")
if param_range is None:
param_range = {}
param_range.setdefault("orientation", "portrait")
param_range.setdefault("distribution", "boxenplot")
param_range.setdefault("scatter", False)
if param_range["orientation"] not in ["portrait", "landscape"]:
raise ValueError("Available param_range['orientation'] are: 'portrait' or 'landscape'.")
if param_range["distribution"] not in ["boxplot", "boxenplot"]:
raise ValueError("Available param_range['distribution'] are: 'boxplot' or 'boxenplot'.")
if not isinstance(param_range["scatter"], bool):
raise TypeError("param_range['scatter'] must be a boolean.")
SignalingSystems(model).simulate_all(
viz_type=viz_type,
show_all=show_all,
stdev=stdev,
save_format=save_format,
param_range=param_range,
)
def run_analysis(
model: ModelObject,
*,
target: str,
metric: str = "integral",
style: str = "barplot",
save_format: str = "pdf",
options: Optional[dict] = None,
) -> None:
"""
Employ sensitivity analysis to identify critical parameters, species or
reactions in the complex biological network.
The sensitivity S(y,x) was calculated according to the following equation:
S(y,x) = d ln(yi) / d ln (xj), where yi is the signaling metric and xj is
each nonzero species, parameter value or reaction rate.
Parameters
---------
model : ModelObject
Model for sensitivity analysis.
target : str
* 'reaction'
* 'initial_condition'
* 'parameter'
metric : str (default: 'integral')
* 'maximum' : The maximum value.
* 'minimum' : The minimum value.
* 'argmax' : The time to reach the maximum value.
* 'argmin' : The time to reach the minimum value.
* 'timepoint' : The simulated value at the time point set via options['timepoint'].
* 'duration' : The time it takes to decline below the threshold set via options['duration'].
* 'integral' : The integral of concentration over the observation time.
style : str (default: 'barplot')
* 'barplot'
* 'heatmap'
save_format : str (default: "pdf")
Either "png" or "pdf", indicating whether to save figures
as png or pdf format.
options : dict, optional
* show_indices : bool (default: True)
(target == 'reaction') Set to True to put reaction index on each bar.
* excluded_params : list of strings
(target == 'parameter') List of parameters which are not used for analysis.
* excluded_initials : list of strings
(target == 'initial_condition') List of species which are not used for analysis.
* timepoint : int (default: model.sim.t[-1])
(metric=='timepoint') Which timepoint to use.
* duration : float (default: 0.5)
(metric=='duration') 0.1 for 10% of its maximum.
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, run_analysis
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> # Parameters
>>> run_analysis(
... model,
... target='parameter',
... options = {
... 'excluded_params': [
... 'a', 'Vn', 'Vc', 'Ligand', 'EGF', 'HRG', 'no_ligand'
... ]
... }
... )
>>> # Initial condition
>>> run_analysis(model, target='initial_condition')
>>> # Reaction
>>> run_analysis(model, target='reaction')
"""
if save_format not in ["pdf", "png"]:
raise ValueError("save_format must be either 'pdf' or 'png'.")
if options is None:
options = {}
options.setdefault("show_indices", True)
options.setdefault("excluded_params", [])
options.setdefault("excluded_initials", [])
options.setdefault("timepoint", model.sim.t[-1])
options.setdefault("duration", 0.5)
if not model.sim.t[0] <= options["timepoint"] <= model.sim.t[-1]:
raise ValueError("options['timepooint'] must lie within sim.t.")
if not 0.0 < options["duration"] < 1.0:
raise ValueError("options['duration'] must lie within (0, 1).")
if target == "reaction":
ReactionSensitivity(model).analyze(
metric=metric,
style=style,
save_format=save_format,
options=options,
)
elif target == "parameter":
ParameterSensitivity(model).analyze(
metric=metric,
style=style,
save_format=save_format,
options=options,
)
elif target == "initial_condition":
InitialConditionSensitivity(model).analyze(
metric=metric,
style=style,
save_format=save_format,
options=options,
)
else:
raise ValueError(
"Available targets are: '{}".format(
"', '".join(["reaction", "parameter", "initial_condition"]) + "'."
)
)
| [
"31299606+himoto@users.noreply.github.com"
] | 31299606+himoto@users.noreply.github.com |
a8ba2d5c8302ea20dac6cf2c653d709d5b012a3b | c35d5713b9991efeb0f8a2665c91c74127138594 | /bufferbloat.py | 4e0431721722d6c7a93a6b3fc70e29396676f8fa | [] | no_license | vs9390/bufferbloat | 70849c13f24e0f7744a7852e8ed838a6235dbd0f | cc5341b5f0c0f835e6ec2e3d536abd2d80a5b096 | refs/heads/master | 2020-04-21T14:16:58.228215 | 2019-02-07T19:30:32 | 2019-02-07T19:30:32 | 169,629,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,038 | py | #!/usr/bin/python
"CS144 In-class exercise: Buffer Bloat"
from mininet.topo import Topo
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.log import lg, info
from mininet.util import dumpNodeConnections
from mininet.cli import CLI
from monitor import monitor_qlen
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import sys
import os
# Parse arguments
parser = ArgumentParser(description="BufferBloat tests")
parser.add_argument('--bw-host', '-B',
dest="bw_host",
type=float,
action="store",
help="Bandwidth of host links",
required=True)
parser.add_argument('--bw-net', '-b',
dest="bw_net",
type=float,
action="store",
help="Bandwidth of network link",
required=True)
parser.add_argument('--delay',
dest="delay",
type=float,
help="Delay in milliseconds of host links",
default=10)
parser.add_argument('--dir', '-d',
dest="dir",
action="store",
help="Directory to store outputs",
default="results",
required=True)
parser.add_argument('-n',
dest="n",
type=int,
action="store",
help="Number of nodes in star.",
required=True)
parser.add_argument('--nflows',
dest="nflows",
action="store",
type=int,
help="Number of flows per host (for TCP)",
required=True)
parser.add_argument('--maxq',
dest="maxq",
action="store",
help="Max buffer size of network interface in packets",
default=500)
parser.add_argument('--cong',
dest="cong",
help="Congestion control algorithm to use",
default="reno")
parser.add_argument('--diff',
help="Enabled differential service",
action='store_true',
dest="diff",
default=False)
# Expt parameters
args = parser.parse_args()
class StarTopo(Topo):
"Star topology for Buffer Bloat experiment"
def __init__(self, n=2, cpu=None, bw_host=1000, bw_net=1.5,
delay=10, maxq=None, diff=False):
# Add default members to class.
super(StarTopo, self ).__init__()
# Create switch and host nodes
for i in xrange(n):
self.addHost( 'h%d' % (i+1), cpu=cpu )
self.addSwitch('s0', fail_mode='open')
self.addLink('h1', 's0', bw=bw_host,
max_queue_size=int(maxq) )
for i in xrange(1, n):
self.addLink('h%d' % (i+1), 's0', bw=bw_host)
def ping_latency(net):
"(Incomplete) verify link latency"
h1 = net.getNodeByName('h1')
h1.sendCmd('ping -c 2 10.0.0.2')
result = h1.waitOutput()
print "Ping result:"
print result.strip()
def bbnet():
"Create network and run Buffer Bloat experiment"
print "starting mininet ...."
# Seconds to run iperf; keep this very high
seconds = 3600
start = time()
# Reset to known state
topo = StarTopo(n=args.n, bw_host=args.bw_host,
delay='%sms' % args.delay,
bw_net=args.bw_net, maxq=args.maxq, diff=args.diff)
net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink,
autoPinCpus=True, controller=lambda name: RemoteController("c0",
ip="0.0.0.0",
port=6653))
# c0 = net.addController('c0', controller=RemoteController, ip="127.0.0.1", port=6653)
net.start()
dumpNodeConnections(net.hosts)
net.pingAll()
print args.diff
if args.diff:
print "Differentiate Traffic Between iperf and wget"
os.system("bash tc_cmd_diff.sh")
else:
print "exec tc_cmd.sh"
os.system("bash tc_cmd.sh %s" % args.maxq)
sleep(2)
ping_latency(net)
print "Initially, the delay between two hosts is around %dms" % (int(args.delay)*2)
h2 = net.getNodeByName('h2')
h1 = net.getNodeByName('h1')
h1.cmd('cd ./http/; nohup python2.7 ./webserver.py &')
h1.cmd('cd ../')
h2.cmd('iperf -s -w 16m -p 5001 -i 1 > iperf-recv.txt &')
CLI( net )
h1.cmd("sudo pkill -9 -f webserver.py")
h2.cmd("rm -f index.html*")
Popen("killall -9 cat", shell=True).wait()
if __name__ == '__main__':
bbnet()
| [
"None"
] | None |
cef01c74e4fcbe916281d218603e31553bacbd68 | cd8e76903791017074bdfffa988cb76bafa77463 | /pylib/chroot.py | d3d561d869d402e088b17a5095bff4f61be4907d | [] | no_license | JedMeister/turnkey-pylib-deb | d5025c9ff2e3a538428779a3bcb77355b1023ddc | 0b470866e147e33bfbdfde100d1f27e9069eefd7 | refs/heads/master | 2016-09-05T17:52:44.198040 | 2014-11-07T00:26:46 | 2014-11-07T00:26:46 | 26,295,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | # Copyright (c) 2008 Liraz Siri <liraz@turnkeylinux.org>
#
# This file is part of turnkey-pylib.
#
# turnkey-pylib is open source software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
import os
from os.path import *
import paths
import executil
from executil import ExecError
class MagicMounts:
class Paths(paths.Paths):
files = [ "proc", "dev/pts" ]
def __init__(self, root="/"):
self.paths = self.Paths(root)
self.mounted_proc_myself = False
self.mounted_devpts_myself = False
self.mount()
@staticmethod
def _is_mounted(dir):
mounts = file("/proc/mounts").read()
if mounts.find(dir) != -1:
return True
return False
def mount(self):
if not self._is_mounted(self.paths.proc):
executil.system("mount -t proc", "proc-chroot", self.paths.proc)
self.mounted_proc_myself = True
if not self._is_mounted(self.paths.dev.pts):
executil.system("mount -t devpts", "devpts-chroot", self.paths.dev.pts)
self.mounted_devpts_myself = True
def umount(self):
if self.mounted_devpts_myself:
executil.system("umount", self.paths.dev.pts)
self.mounted_devpts_myself = False
if self.mounted_proc_myself:
executil.system("umount", self.paths.proc)
self.mounted_proc_myself = False
def __del__(self):
self.umount()
class Chroot:
ExecError = ExecError
def __init__(self, newroot, environ={}):
self.environ = { 'HOME': '/root',
'TERM': os.environ['TERM'],
'LC_ALL': 'C',
'PATH': "/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/bin:/usr/sbin" }
self.environ.update(environ)
self.path = realpath(newroot)
self.magicmounts = MagicMounts(self.path)
def _prepare_command(self, *command):
env = ['env', '-i' ] + [ executil.mkarg(name + "=" + val)
for name, val in self.environ.items() ]
command = executil.fmt_command(*command)
return ("chroot", self.path, 'sh', '-c', " ".join(env) + " " + command)
def system(self, *command):
"""execute system command in chroot -> None"""
executil.system(*self._prepare_command(*command))
def getoutput(self, *command):
return executil.getoutput(*self._prepare_command(*command))
| [
"jeremy@turnkeylinux.org"
] | jeremy@turnkeylinux.org |
32c535d570b560fb223b6df5cef4f2c1fbca5d69 | 9078857ba924021a5bed634d395528c5d884fcdf | /11047.py | 721646a826f0c1fad3404979aed3a435542f6489 | [] | no_license | yujinHan97/Algorithm-Study | 26ab4e565d071133c15f98eb912524f435e04c53 | 31d27391f1ad3b6f16c2ba79ab1d680c5b3bc650 | refs/heads/master | 2023-06-18T09:52:25.999567 | 2021-07-16T15:08:52 | 2021-07-16T15:08:52 | 336,694,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | '''
11047 동전 0
알고리즘:
1. 동전의 종류(A_list)를 내림차순으로 정렬
2. A_list를 K와 비교하면서 가장 가까운 종류를 찾기
3. 가장 가까운 종류를 나눈 몫만큼 count횟수 더하고, K에서 그 돈만큼 빼는 과정 반복
'''
import sys
N, K = map(int, input().split())
A_list = []
count = 0
for i in range(N):
A_list.append(int(sys.stdin.readline()))
A_list.sort(reverse = True)
for money in A_list:
if money > K:
continue
else:
count += (K // money)
K %= money
print(count)
| [
"dbwlslek97@gmail.com"
] | dbwlslek97@gmail.com |
cf38d83b92adeb028ec0a5e36ef6ed766d954ac0 | 0e4d09b2a1b93aaa6d623d16905854d993a934ae | /Python/Django/surprise_me/surprise_me/settings.py | f424efc942cbefeba7f250e60cd038b1cc08d43a | [] | no_license | freefaller69/DojoAssignments | ee7f6308b02041be3244f795422e0e044d4a41b2 | f40426ac448026c1172048665f36024ad22f0d81 | refs/heads/master | 2021-01-17T10:23:39.419514 | 2017-07-25T00:50:41 | 2017-07-25T00:50:41 | 84,012,790 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | """
Django settings for surprise_me project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%*yk4s@mw!nm_8^^jkde_l^vdqldj2=v@dzqj&h6%z9l$t2b$='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.surprise',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'surprise_me.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'surprise_me.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"freefaller@gmail.com"
] | freefaller@gmail.com |
2c59afbc1741ce1c16e5814c3f1f44ec34556c6a | d2d15f262dabda5fb2e851c5e11a35c37f1e4935 | /elements/urls.py | 3017d9c070585c22b978a64ab45c51306bad1781 | [
"MIT"
] | permissive | Kipok/science-map | 824e5ed0ced9052bfc0b0041e3694597336999cc | 36d06580392780dc08d983868495ee212c8127ae | refs/heads/master | 2021-06-10T10:48:17.701758 | 2019-10-27T10:11:11 | 2019-10-27T10:11:11 | 120,179,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from django.urls import path
from . import views
app_name = 'elements'
urlpatterns = [
path('paper/<int:paper_id>/', views.paper_view, name='paper'),
path('method/<int:method_id>/', views.method_view, name='method'),
path('dataset/<int:dataset_id>/', views.dataset_view, name='dataset'),
path('metric/<int:metric_id>/', views.metric_view, name='metric'),
path('link_type/<int:link_type_id>/', views.link_type_view, name='link_type'),
path('paper_type/<int:paper_type_id>/', views.paper_type_view,
name='paper_type'),
path('text_result/<int:text_result_id>/', views.text_result_view,
name='text_result'),
]
| [
"ivypawn@gmail.com"
] | ivypawn@gmail.com |
9ec56eaa7c9e3aa5e82ca68b0ea6bbed4c7e90c8 | 6bef61d7cb848bda73506992839b3e0990c0207c | /HW4/market_simulator_for_hw4.py | ac75b9a3cda7f21c2a7f754502a21daae7d6520c | [] | no_license | rchenmit/ML4T_coursera | 522c6ef4d98f4a1eb8863535890c6ead74ac4447 | 1206aa146b95bd8c45e50fb886aecdb330d3b4ef | refs/heads/master | 2021-01-17T11:20:33.534632 | 2015-10-06T20:39:03 | 2015-10-06T20:39:03 | 14,269,740 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,618 | py | ## computational investing 1 - HW4
## market simulator -- events output will be fed into here
## edited by rchen - Nov 6, 2013
import pandas as pd
import numpy as np
import math
import copy
import csv
import scipy as s
import sys
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
## Functions
##read in CSV file -- for this exercise, use the CSV files they gave us on HW3 page!
##this will return an array: trades
def read_csv_to_df(filename):
reader = csv.reader(open(filename, 'rU'), delimiter=',')
dates = np.array([]) #all the dates
orderdata = []
print "reading in the raw data: ---------------------- \n"
for row in reader:
year = int(row[0])
month = int(row[1])
day = int(row[2])
sym = row[3]
action = row[4]
volume = int(row[5])
dates = np.append(dates, dt.datetime(year, month, day) + dt.timedelta(hours=16)) #add the date
orderdata.append([sym, action, volume])
#create pandas dataframe with timestamps and order data
df_orders = pd.DataFrame(orderdata, index=dates, columns=['sym', 'action', 'volume'])
return df_orders
## Main script! -----------------------------------------------------------------------------
i_start_cash = 50000
csv_input = sys.argv[1] # read in CSV file and print it out
csv_output = './market_sim_fundvalue.csv' #output file
## STEP 1 (from instructions in compinvesting1-PDF-MLT Simulator.pdf
df_orders = read_csv_to_df(csv_input).sort() ##pandas dataframe; read in using function i wrote above; sort the Pandas dataframe by index(date) after its read in
# get a list of all dates (removing duplicates) and all symbols traded (removing duplicates)
dt_start = min(df_orders.index)
dt_end = max(df_orders.index)
ls_dt_all_from_orders = df_orders.index.tolist()
sym_all_from_orders = df_orders['sym'].tolist()
#remove duplicate syms, dates
ls_sym_unique = list(set(sym_all_from_orders))
ls_dt_unique = list(set(ls_dt_all_from_orders))
ls_dt_unique.sort()
## STEP 2 -- put this into a function!
#read in the data from Yahoo
dataobj = da.DataAccess('Yahoo')
dt_start_read = dt_start
dt_end_read = dt_end + dt.timedelta(days=1) #end date needs to be offset by one
ldt_timestamps = du.getNYSEdays(dt_start_read, dt_end_read, dt.timedelta(hours=16))
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_sym_unique, ls_keys)
d_data = dict(zip(ls_keys, ldf_data)) #this is the data for the symbols we're interested in
#remove the NaNs from the price data
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
## STEP 3
#dataframe for SHARES of each symbol that you are CURRENTLY HOLDING- make sure they are floating point numbers!
df_trade_matrix = np.zeros((len(ldt_timestamps), len(ls_sym_unique)))
df_trade_matrix = pd.DataFrame(df_trade_matrix, index = ldt_timestamps, columns = ls_sym_unique)
##df_trade_matrix = pd.DataFrame(index=ldt_timestamps, columns=ls_sym_unique)
##df_trade_matrix = df_trade_matrix.fillna(0) # with 0s rather than NaNs
##df_trade_matrix = df_trade_matrix.sort()
#fill the dataframe for shares to add (for each trade) of each symbol (df_trade_matrix)
#do this by iterating through the orders (df_orders) and filling the number of shares for that particular symbol and date
for date, row in df_orders.iterrows():
if row['action'] == 'Buy':
shares_add = int(row['volume'])
elif row['action'] == 'Sell':
shares_add = -1*int(row['volume'])
else:
next
symbol = row['sym']
df_trade_matrix.loc[date][symbol] += float(shares_add)
## STEP 4
# create a timeseries for CASH - tells you what your CASH VALUE is (starting cash + any buy/sell you've made)
#df_cash = pd.DataFrame( s.zeros(len(ls_dt_unique)), ls_dt_unique, columns=['CASH'])
ts_cash = pd.TimeSeries( 0.0, ldt_timestamps)
ts_cash[0] = i_start_cash
# for each order, subtract the cash used in that trade
# need to multiple volume*price
df_close = d_data['close']
for date, row in df_trade_matrix.iterrows():
## for sym in df_trade_matrix.columns:
## price = df_close[sym].ix[date]
## print price, sym
## s_cash.loc[date] -= price * df_trade_matrix.loc[date][sym] #update the cash SPENT
##use dot product - faster than nested for loop (commented above this line)
cash = np.dot(row.values.astype(float), df_close.ix[date].values)
ts_cash[date] -= cash
## STEP 5
# append '_CASH' into the price data; df_close['_CASH'] = 1.0
df_trade_matrix['_CASH'] = ts_cash
df_trade_matrix = df_trade_matrix.cumsum() #fills forward with the current shares you're holding (this is the HOLDING MATRIX)
# calculate teh fund value with the price matrix and the holding matrix
historic = df_close
historic['_CASH']=1
ts_fundvalue = pd.TimeSeries(0.0, df_close.index)
for date, row in df_trade_matrix.iterrows():
ts_fundvalue[date] += np.dot(row.values.astype(float), df_close.ix[date].values)
## STEP 6
# write the ts_fundvalue to a CSV file!
csv_file = open(csv_output, 'wb') #open the CSV file
writer = csv.writer(csv_file, delimiter=',')
for row_index in ts_fundvalue.index:
row_to_write = [str(row_index.year), str(row_index.month), \
str(row_index.day), str(ts_fundvalue[row_index])]
writer.writerow(row_to_write)
csv_file.close() #close the CSV file!!!
| [
"thinkpad@linuxmint.linuxmint-domain"
] | thinkpad@linuxmint.linuxmint-domain |
39d73adac36ee04742368f02e560a675ce46567d | 143289266147aec234dc6d202fc1efc9e30cd782 | /TestCase/login_test.py | 95b1f3814090e53d7512f601cedef370ad2c67d5 | [] | no_license | nijinrong/api-test | ad32a04829916d8fcfc821f5d8dda0053d119935 | 98a0cb7063185ca2e7c3dab328b9c3276935ba7d | refs/heads/master | 2022-12-10T05:22:34.257735 | 2019-04-24T09:50:05 | 2019-04-24T09:50:05 | 183,135,126 | 0 | 0 | null | 2022-12-08T05:01:15 | 2019-04-24T02:51:52 | Python | UTF-8 | Python | false | false | 2,870 | py |
from Common import Request,Assert,read_excel
import allure
import pytest
request = Request.Request()
assertion = Assert.Assertions()
idsList=[]
excel_list = read_excel.read_excel_list('./document/test.xlsx')
length = len(excel_list)
for i in range(length):
idsList.append(excel_list[i].pop())
url = 'http://192.168.1.137:8080/'
head = {}
@allure.feature("登录功能")
class Test_login:
@allure.story("登录")
def test_login(self):
login_resp = request.post_request(url='http://192.168.1.137:8080/admin/login',
json={"username":"admin","password":"123456"})
resp_text = login_resp.text
print(type(resp_text))
resp_dict = login_resp.json()
print(type(resp_dict))
assertion.assert_code(login_resp.status_code,200)
assertion.assert_in_text(resp_dict['message'],'成功')
data_dict = resp_dict['data']
token = data_dict['token']
tokenHead = data_dict['tokenHead']
global head
head = {'Authorization':tokenHead+token}
@allure.story("获取用户信息")
def test_info(self):
info_resp = request.get_request(url=url+ 'admin/info', headers=head)
resp_dict = info_resp.json()
assertion.assert_code(info_resp.status_code, 200)
assertion.assert_in_text(resp_dict['message'], '成功')
@allure.story("测试登录")
@pytest.mark.parametrize("username,password,msg",
[['admin', '123456', '成功'], ['admin1', '123456', '错误'],['admin', '123456a', '错误'],
['admin', '123456a', '错误'],['admin', '123456a', '错误'],['admin', '123456a', '错误']],
ids=['登录成功', '用户名错误', '密码错误', '登录成功1', '用户名错误1', '密码错误1'])
def test_login1(self,username,password,msg):
login_resp = request.post_request(url=url+'admin/login',
json={"username": username,"password": password})
resp_text = login_resp.text
print(type(resp_text))
resp_dict = login_resp.json()
print(type(resp_dict))
assertion.assert_code(login_resp.status_code,200)
assertion.assert_in_text(resp_dict['message'],msg)
@allure.story("测试登录2")
@pytest.mark.parametrize("username,password,msg",excel_list,ids=idsList)
def test_login2(self,username,password,msg):
login_resp = request.post_request(url=url + 'admin/login',
json={"username": username, "password": password})
resp_text = login_resp.text
print(type(resp_text))
resp_dict = login_resp.json()
print(type(resp_dict))
assertion.assert_code(login_resp.status_code, 200)
assertion.assert_in_text(resp_dict['message'], msg)
| [
"nijinrong1998@163.com"
] | nijinrong1998@163.com |
6edc0f2baab7ec5c405ac371a643b44914f76e20 | 2a5f238353c223bd283178b503a3a02f6edae797 | /posts/migrations/0004_post_image_path.py | 9d081ce0aa3fd6dee0b0e3f5f52550720de9c751 | [] | no_license | eleanor107/blog | 97619c6bd2fdd62e6313772d52c14c119b357c80 | 7c2df3e9da3694135722112eb1acdb57284a4988 | refs/heads/master | 2016-08-11T08:16:27.120604 | 2016-04-02T05:05:10 | 2016-04-02T05:05:10 | 55,199,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-01 02:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20160330_0705'),
]
operations = [
migrations.AddField(
model_name='post',
name='image_path',
field=models.TextField(default=''),
),
]
| [
"eleanorlee@dyn-129-236-212-164.dyn.columbia.edu"
] | eleanorlee@dyn-129-236-212-164.dyn.columbia.edu |
cf7a5c949b145a86634b083e8acd0620cef804a3 | de4817e9c0f16283c4c6f9dcec3a0c36f49adf0f | /pytest_cases/plugin.py | b79d808e778403f49ecb8187fb7308ad67041226 | [
"BSD-3-Clause"
] | permissive | yashtodi94/python-pytest-cases | 3422cd4f399543b5add22d8631980b20bb92d68a | 81bd5b3d2a7b358e8d9f97dae77654f6bc9c7999 | refs/heads/master | 2020-06-27T18:22:06.798892 | 2019-07-31T07:40:56 | 2019-07-31T07:40:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,492 | py | from collections import OrderedDict, namedtuple
from copy import copy
from distutils.version import LooseVersion
from warnings import warn
from functools import partial
import pytest
from pytest_cases.common import get_pytest_nodeid, get_pytest_function_scopenum, \
is_function_node, get_param_names, get_pytest_scopenum, get_param_argnames_as_list
from pytest_cases.main_fixtures import NOT_USED, is_fixture_union_params, UnionFixtureAlternative, apply_id_style
try: # python 3.3+
from inspect import signature
except ImportError:
from funcsigs import signature
try: # python 3.3+ type hints
from typing import Optional, List, Tuple, Union, Iterable
from _pytest.python import CallSpec2
except ImportError:
pass
_DEBUG = False
# @hookspec(firstresult=True)
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_collection(session):
# override the fixture manager's method
session._fixturemanager.getfixtureclosure = partial(getfixtureclosure, session._fixturemanager)
class FixtureDefsCache(object):
"""
The object plays a role of 'cache' for fixture definitions.
"""
__slots__ = 'fm', 'nodeid', 'cached_fix_defs'
def __init__(self, fm, nodeid):
self.fm = fm
self.nodeid = nodeid
self.cached_fix_defs = dict()
def get_fixture_defs(self, fixname):
try:
# try to retrieve it from cache
fixdefs = self.cached_fix_defs[fixname]
except KeyError:
# otherwise get it and store for next time
fixdefs = self.fm.getfixturedefs(fixname, self.nodeid)
self.cached_fix_defs[fixname] = fixdefs
return fixdefs
class FixtureClosureNode(object):
__slots__ = 'parent', 'fixture_defs', \
'split_fixture_name', 'split_fixture_discarded_names', 'children', \
'_as_list', 'all_fixture_defs'
def __init__(self, parent_node=None):
self.parent = parent_node
# these will be set after closure has been built
self.fixture_defs = None
self.split_fixture_name = None
self.split_fixture_discarded_names = []
self.children = OrderedDict()
# this will be created after the first time the object is converted to a list (cache)
self._as_list = None
self.all_fixture_defs = None
# ------ tree
def get_leaves(self):
if self.has_split():
return [n for c in self.children.values() for n in c.get_leaves()]
else:
return [self]
# ------
def to_str(self, indent_nb=0, with_children=True, with_discarded=True):
"""
Provides a string representation, either with all the subtree (default) or without (with_children=False)
You can also remove the "discarded" information for clarity with with_discarded=False
:param indent_nb:
:param with_children:
:param with_discarded:
:return:
"""
indent = " " * indent_nb
if not self.is_closure_built():
str_repr = "<pending, incomplete>"
else:
str_repr = "%s(%s)" % (indent, ",".join([("%s" % f) for f in self.fixture_defs.keys()]))
if with_discarded:
str_repr += " (discarded: %s)" % self.split_fixture_discarded_names
if self.has_split() and with_children:
children_str_prefix = "\n%s - " % indent
children_str = children_str_prefix + children_str_prefix.join([c.to_str(indent_nb=indent_nb + 1)
for c in self.children.values()])
str_repr = str_repr + " split: " + self.split_fixture_name + children_str
return str_repr
def __repr__(self):
return self.to_str()
# ---- list facade
def __iter__(self):
return iter(self.to_list())
def __getitem__(self, item):
return self.to_list()[item]
def __setitem__(self, key, value):
# This is called in Pytest 4+. TODO how should we behave ?
warn("WARNING the new order is not taken into account !!")
pass
def index(self, *args):
return self.to_list().index(*args)
def to_list(self):
"""
Converts self to a list to get all fixture names, and caches the result.
The first time this is called, a non-none arg2fixturedefs object Must be provided to sort the fixture names
according to scope.
TODO maybe this sorting should actually be propagated down the tree so that it is done per branch
:param arg2fixturedefs:
:return:
"""
if self._as_list is None:
# crawl the tree to get the list of unique fixture names
fixturenames_closure = self._to_list()
if LooseVersion(pytest.__version__) >= LooseVersion('3.5.0'):
# sort by scope
def sort_by_scope(arg_name):
try:
fixturedefs = self.get_all_fixture_defs()[arg_name]
except KeyError:
return get_pytest_function_scopenum()
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
self._as_list = fixturenames_closure
return self._as_list
def _to_list(self):
""" Returns a list of all fixture names used (with no redundancy) """
lst = []
self._append_to(lst)
# eliminate redundancy
unique_lst = _make_unique(lst)
# TODO remove for efficiency
assert set(unique_lst) == set(lst)
return unique_lst
def _append_to(self, lst):
"""Appends all fixture names of this subtree to the given list"""
# first append the fixture names
lst += list(self.fixture_defs.keys())
# then if there is a split at this node
if self.has_split():
# add the split fixture > not needed anymore
# lst.append(self.split_fixture_name)
# add all children
for c in self.children.values():
c._append_to(lst)
# ----
def get_all_fixture_defs(self):
if self.all_fixture_defs is None:
# collect
self.all_fixture_defs = self._get_all_fixture_defs()
return self.all_fixture_defs
def _get_all_fixture_defs(self):
all = OrderedDict()
for k, v in self.fixture_defs.items():
if v is not None:
all[k] = v
for c in self.children.values():
all.update(c.get_all_fixture_defs())
return all
# ---- utils to build the closure
def build_closure(self,
fixture_defs_mgr, # type: FixtureDefsCache
initial_fixture_names # type: Iterable[str]
):
self._build_closure(fixture_defs_mgr, initial_fixture_names)
def is_closure_built(self):
return self.fixture_defs is not None
def already_knows_fixture(self, fixture_name):
""" Return True if this fixture is known by this node or one of its parents """
if fixture_name in self.fixture_defs:
return True
elif self.parent is None:
return False
else:
return self.parent.already_knows_fixture(fixture_name)
def _build_closure(self,
fixture_defs_mgr, # type: FixtureDefsCache
initial_fixture_names # type: Iterable[str]
):
"""
:param arg2fixturedefs: set of fixtures already known by the parent node
:return: nothing (the input arg2fixturedefs is modified)
"""
# Grab all dependencies of all fixtures present at this node and add them to either this or to nodes below.
# -- first switch this object from 'pending' to 'under construction' if needed
# (indeed we now authorize and use the possibility to call this twice. see split() )
if self.fixture_defs is None:
self.fixture_defs = OrderedDict()
# -- then for all pending, add them with their dependencies
pending_fixture_names = list(initial_fixture_names)
while len(pending_fixture_names) > 0:
fixname = pending_fixture_names.pop(0)
# if the fixture is already known in this node or above, do not care
if self.already_knows_fixture(fixname):
continue
# else grab the fixture definition(s) for this fixture name for this test node id
fixturedefs = fixture_defs_mgr.get_fixture_defs(fixname)
if not fixturedefs:
# fixture without definition: add it
self.add_required_fixture(fixname, None)
else:
# the actual definition is the last one
_fixdef = fixturedefs[-1]
_params = _fixdef.params
if _params is not None and is_fixture_union_params(_params):
# create an UNION fixture
# transform the _params into a list of names
alternative_f_names = UnionFixtureAlternative.to_list_of_fixture_names(_params)
# if there are direct dependencies that are not the union members, add them to pending
non_member_dependencies = [f for f in _fixdef.argnames if f not in alternative_f_names]
pending_fixture_names += non_member_dependencies
# propagate WITH the pending
self.split_and_build(fixture_defs_mgr, fixname, fixturedefs, alternative_f_names,
pending_fixture_names)
# empty the pending
pending_fixture_names = []
else:
# normal fixture
self.add_required_fixture(fixname, fixturedefs)
# add all dependencies in the to do list
dependencies = _fixdef.argnames
# - append: was pytest default
# pending_fixture_names += dependencies
# - prepend: makes much more sense
pending_fixture_names = list(dependencies) + pending_fixture_names
# ------ tools to add new fixture names during closure construction
def add_required_fixture(self, new_fixture_name, new_fixture_defs):
""" Adds some required fixture names to this node. Returns True if new fixtures were added here (not in child)"""
if self.already_knows_fixture(new_fixture_name):
return
elif not self.has_split():
# add_required_fixture locally
if new_fixture_name not in self.fixture_defs:
self.fixture_defs[new_fixture_name] = new_fixture_defs
else:
# add_required_fixture in each child
for c in self.children.values():
c.add_required_fixture(new_fixture_name, new_fixture_defs)
def split_and_build(self,
fixture_defs_mgr, # type: FixtureDefsCache
split_fixture_name, # type: str
split_fixture_defs, # type: Tuple[FixtureDefinition]
alternative_fixture_names, # type: List[str]
pending_fixtures_list #
):
""" Declares that this node contains a union with alternatives (child nodes=subtrees) """
if self.has_split():
raise ValueError("This should not happen anymore")
# # propagate the split on the children: split each of them
# for n in self.children.values():
# n.split_and_build(fm, nodeid, split_fixture_name, split_fixture_defs, alternative_fixture_names)
else:
# add the split (union) name to known fixtures
self.add_required_fixture(split_fixture_name, split_fixture_defs)
# remember it
self.split_fixture_name = split_fixture_name
# create the child nodes
for f in alternative_fixture_names:
# create the child node
new_c = FixtureClosureNode(self)
self.children[f] = new_c
# set the discarded fixture names
new_c.split_fixture_discarded_names = [g for g in alternative_fixture_names if g != f]
# perform the propagation:
# create a copy of the pending fixtures list and prepend the fixture used
pending_for_child = copy(pending_fixtures_list)
# (a) first propagate all child's dependencies
new_c._build_closure(fixture_defs_mgr, [f])
# (b) then the ones required by parent
new_c._build_closure(fixture_defs_mgr, pending_for_child)
def has_split(self):
return self.split_fixture_name is not None
def get_not_always_used(self):
"""Returns the list of fixtures used by this subtree, that are not always used"""
results_list = []
# initial list is made of fixtures that are in the children
initial_list = self.gather_all_required(include_parents=False)
for c in self.get_leaves():
j = 0
for i in range(len(initial_list)):
fixture_name = initial_list[j]
if fixture_name not in c.gather_all_required():
del initial_list[j]
results_list.append(fixture_name)
else:
j += 1
return results_list
def gather_all_required(self, include_children=True, include_parents=True):
"""
Returns a list of all fixtures required by the subtree at this node
:param include_children:
:return:
"""
# first the fixtures required by this node
required = list(self.fixture_defs.keys())
# then the ones required by the parents
if include_parents and self.parent is not None:
required = required + self.parent.gather_all_required(include_children=False)
# then the ones from all the children
if include_children:
for child in self.children.values():
required = required + child.gather_all_required(include_parents=False)
return required
def requires(self, fixturename):
"""
Returns True if the fixture with this name is required by the subtree at this node
:param fixturename:
:return:
"""
return fixturename in self.gather_all_required()
def gather_all_discarded(self):
"""
Returns a list of all fixture names discarded during splits from the parent node down to this node.
Note: this does not include the split done at this node if any, nor all of its subtree.
:return:
"""
discarded = list(self.split_fixture_discarded_names)
if self.parent is not None:
discarded = discarded + self.parent.gather_all_discarded()
return discarded
# ------ tools to see the tree as a list of alternatives
def print_alternatives(self):
return FixtureClosureNode.print_alternatives_list(*self.get_alternatives())
@staticmethod
def print_alternatives_list(filters_list, fixtures_list):
for f, p in zip(filters_list, fixtures_list):
print(f, p)
def get_alternatives(self):
"""
Returns the alternatives
- a list of dictionaries union_fixture_name: value representing the filters on this alternative
- a list of tuples of fixture names used by each alternative
- a list of tuples of discarded fixture names in each alternative
:return:
"""
if self.has_split():
partitions_list = []
filters_list = []
discarded_list = []
for k, c in self.children.items():
child_filters_dct, child_partitions, child_discarded = c.get_alternatives()
for f_dct, p, d in zip(child_filters_dct, child_partitions, child_discarded):
# append a partition for this child:
# - filter
_f_dct = f_dct.copy()
_f_dct[self.split_fixture_name] = k
filters_list.append(_f_dct)
# - fixtures used
partitions_list.append(_make_unique(list(self.fixture_defs.keys()) + p))
# - fixtures not used.
discarded_list.append(_make_unique(self.split_fixture_discarded_names
+ [df for df in d if df not in self.fixture_defs.keys()]))
return filters_list, partitions_list, discarded_list
else:
# return a single partition containing all fixture names
return [dict()], [list(self.fixture_defs.keys())], [list(self.split_fixture_discarded_names)]
def merge(new_items, into_list):
"""
Appends items from `new_items` into `into_list`, only if they are not already there.
:param new_items:
:param into_list:
:return:
"""
at_least_one_added = False
for l in new_items:
if l not in into_list:
into_list.append(l)
at_least_one_added = True
return at_least_one_added
def getfixtureclosure(fm, fixturenames, parentnode, ignore_args=()):
# first retrieve the normal pytest output for comparison
kwargs = dict()
if LooseVersion(pytest.__version__) >= LooseVersion('4.6.0'):
# new argument "ignore_args" in 4.6+
kwargs['ignore_args'] = ignore_args
if LooseVersion(pytest.__version__) >= LooseVersion('3.7.0'):
# three outputs
initial_names, ref_fixturenames, ref_arg2fixturedefs = \
fm.__class__.getfixtureclosure(fm, fixturenames, parentnode, **kwargs)
else:
# two outputs
ref_fixturenames, ref_arg2fixturedefs = fm.__class__.getfixtureclosure(fm, fixturenames, parentnode)
# now let's do it by ourselves.
parentid = parentnode.nodeid
# Create closure
# -- auto-use fixtures
_init_fixnames = fm._getautousenames(parentid)
# -- required fixtures/params.
# ********* fix the order of initial fixtures: indeed this order may not be the right one ************
# this only works when pytest version is > 3.4, otherwise the parent node is a Module
if is_function_node(parentnode):
# grab all the parametrization on that node and fix the order.
# Note: on pytest >= 4 the list of param_names is probably the same than the `ignore_args` input
param_names = get_param_names(parentnode)
sorted_fixturenames = sort_according_to_ref_list(fixturenames, param_names)
# **********
merge(sorted_fixturenames, _init_fixnames)
else:
# we cannot sort yet
merge(fixturenames, _init_fixnames)
# Finally create the closure tree
if _DEBUG:
print("Creating closure for %s:" % parentid)
fixture_defs_mger = FixtureDefsCache(fm, parentid)
fixturenames_closure_node = FixtureClosureNode()
fixturenames_closure_node.build_closure(fixture_defs_mger, _init_fixnames)
if _DEBUG:
print("Closure for %s completed:" % parentid)
print(fixturenames_closure_node)
# sort the fixture names (note: only in recent pytest)
fixturenames_closure_node.to_list()
# FINALLY compare with the previous behaviour TODO remove when in 'production' ?
if len(ignore_args) == 0:
assert fixturenames_closure_node.get_all_fixture_defs() == ref_arg2fixturedefs
# if fixturenames_closure_node.has_split():
# # order might be changed
# assert set((str(f) for f in fixturenames_closure_node)) == set(ref_fixturenames)
# else:
# # same order
# if len(p_markers) < 2:
# assert list(fixturenames_closure_node) == ref_fixturenames
# else:
# NOW different order happens all the time because of the "prepend" strategy in the closure building
# which makes much more sense/intuition.
assert set((str(f) for f in fixturenames_closure_node)) == set(ref_fixturenames)
# and store our closure in the node
# note as an alternative we could return a custom object in place of the ref_fixturenames
# store_union_closure_in_node(fixturenames_closure_node, parentnode)
if LooseVersion(pytest.__version__) >= LooseVersion('3.7.0'):
our_initial_names = sorted_fixturenames # initial_names
return our_initial_names, fixturenames_closure_node, ref_arg2fixturedefs
else:
return fixturenames_closure_node, ref_arg2fixturedefs
# ------------ hack to store and retrieve our custom "closure" object
# def store_union_closure_in_node(fixturenames_closure_node, parentnode):
# parentnode.advanced_fixture_closure = fixturenames_closure_node
def retrieve_union_closure_from_metafunc(metafunc):
return metafunc.fixturenames
# ---------------------------------------
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_generate_tests(metafunc):
"""
We use this hook to replace the 'partial' function of `metafunc` with our own below, before it is called by pytest
:param metafunc:
:return:
"""
# override the parametrize method.
# Note we could do it in a static way in pytest_sessionstart or plugin init hook, but we would need to save the
metafunc.parametrize = partial(parametrize, metafunc)
# now let pytest parametrize the call as usual
_ = yield
class UnionParamz(namedtuple('UnionParamz', ['union_fixture_name', 'alternative_names', 'ids', 'scope', 'kwargs'])):
""" Represents some parametrization to be applied, for a union fixture """
__slots__ = ()
def __str__(self):
return "[UNION] %s=[%s], ids=%s, scope=%s, kwargs=%s" \
"" % (self.union_fixture_name, ','.join([str(a) for a in self.alternative_names]),
self.ids, self.scope, self.kwargs)
class NormalParamz(namedtuple('NormalParamz', ['argnames', 'argvalues', 'indirect', 'ids', 'scope', 'kwargs'])):
""" Represents some parametrization to be applied """
__slots__ = ()
def __str__(self):
return "[NORMAL] %s=[%s], indirect=%s, ids=%s, scope=%s, kwargs=%s" \
"" % (self.argnames, self.argvalues, self.indirect, self.ids, self.scope, self.kwargs)
def parametrize(metafunc, argnames, argvalues, indirect=False, ids=None, scope=None, **kwargs):
"""
This alternate implementation of metafunc.parametrize creates a list of calls that is not just the cartesian
product of all parameters (like the pytest behaviour).
Instead, it offers an alternate list of calls takinginto account all union fixtures.
For this, it replaces the `metafunc._calls` attribute with a `CallsReactor` instance, and feeds it with all parameters
and parametrized fixtures independently (not doing any cross-product).
The resulting `CallsReactor` instance is then able to dynamically behave like the correct list of calls, lazy-creating
that list when it is used.
"""
# create our special container object if needed
if not isinstance(metafunc._calls, CallsReactor):
# first call: should be an empty list
if len(metafunc._calls) > 0:
raise ValueError("This should not happen - please file an issue")
metafunc._calls = CallsReactor(metafunc)
# grab it
calls_reactor = metafunc._calls
# detect union fixtures
if is_fixture_union_params(argvalues):
if ',' in argnames or not isinstance(argnames, str):
raise ValueError("Union fixtures can not be parametrized")
union_fixture_name = argnames
union_fixture_alternatives = argvalues
if indirect is False or len(kwargs) > 0:
raise ValueError("indirect cannot be set on a union fixture, as well as unknown kwargs")
# add a union parametrization in the queue (but do not apply it now)
calls_reactor.append(UnionParamz(union_fixture_name, union_fixture_alternatives, ids, scope, kwargs))
else:
# add a normal parametrization in the queue (but do not apply it now)
calls_reactor.append(NormalParamz(argnames, argvalues, indirect, ids, scope, kwargs))
# put our object back in place - not needed anymore
# metafunc._calls = calls_reactor
class CallsReactor:
"""
This object replaces the list of calls that was in `metafunc._calls`.
It behaves like a list, but it actually builds that list dynamically based on all parametrizations collected
from the custom `metafunc.parametrize` above.
There are therefore three steps:
- when `metafunc.parametrize` is called, this object gets called on `add_union` or `add_param`. A parametrization
order gets stored in `self._pending`
- when this object is first read as a list, all parametrization orders in `self._pending` are transformed into a
tree in `self._tree`, and `self._pending` is discarded. This is done in `create_tree_from_pending_parametrization`.
- finally, the list is built from the tree using `self._tree.to_call_list()`. This will also be the case in
subsequent usages of this object.
"""
__slots__ = 'metafunc', '_pending', '_call_list'
def __init__(self, metafunc):
self.metafunc = metafunc
self._pending = []
self._call_list = None
# -- methods to provising parametrization orders without executing them --
def append(self,
parametrization # type: Union[UnionParamz, NormalParamz]
):
self._pending.append(parametrization)
def print_parametrization_list(self):
"""Helper method to print all pending parametrizations in this reactor """
print("\n".join([str(p) for p in self._pending]))
# -- list facade --
def __iter__(self):
return iter(self.calls_list)
def __getitem__(self, item):
return self.calls_list[item]
@property
def calls_list(self):
"""
Returns the list of calls. This property relies on self._tree, that is lazily created on first access,
based on `self.parametrizations`.
:return:
"""
if self._call_list is None:
# create the definitive tree.
self.create_call_list_from_pending_parametrizations()
return self._call_list
# --- tree creation (executed once the first time this object is used as a list)
def create_call_list_from_pending_parametrizations(self):
"""
Takes all parametrization operations that are pending in `self._pending`,
and creates a parametrization tree out of them.
self._pending is set to None afterwards
:return:
"""
# temporarily remove self from the _calls field, we'll need to change it
bak_calls = self.metafunc._calls
assert bak_calls is self
# grab the fixtures closure tree created previously (see getfixtureclosure above)
fix_closure_tree = retrieve_union_closure_from_metafunc(self.metafunc)
# ------ parametrize the calls --------
# create a dictionary of pending things to parametrize, and only keep the first parameter in case of several
pending_items = [(get_param_argnames_as_list(p[0])[0], p) for p in self._pending]
pending = OrderedDict(pending_items)
if _DEBUG:
print()
print("---- pending parametrization ----")
self.print_parametrization_list()
print("---------------------------------")
print()
print("Applying all of them in the closure tree nodes:")
calls, nodes = self._process_node(fix_closure_tree, pending.copy(), [])
self._cleanup_calls_list(fix_closure_tree, calls, nodes, pending)
if _DEBUG:
print("\n".join(["%s[%s]: funcargs=%s, params=%s" % (get_pytest_nodeid(self.metafunc),
c.id, c.funcargs, c.params)
for c in calls]))
print()
self._call_list = calls
# put back self as the _calls facade
self.metafunc._calls = bak_calls
# forget about all parametrizations now - this wont happen again
self._pending = None
def _cleanup_calls_list(self, fix_closure_tree, calls, nodes, pending):
"""
Cleans the calls list so that all calls contain a value for all parameters. This is basically
about adding "NOT_USED" parametrization everywhere relevant.
:param calls:
:param nodes:
:param pending:
:return:
"""
nb_calls = len(calls)
if nb_calls != len(nodes):
raise ValueError("This should not happen !")
# function_scope_num = get_pytest_function_scopenum()
for i in range(nb_calls):
c, n = calls[i], nodes[i]
# A/ set to "not used" all parametrized fixtures that were not used in some branches
for fixture, p_to_apply in pending.items():
if fixture not in c.params and fixture not in c.funcargs:
# parametrize with a single "not used" value and discard the id
if isinstance(p_to_apply, UnionParamz):
c_with_dummy = self._parametrize_calls([c], p_to_apply.union_fixture_name, [NOT_USED],
indirect=True, discard_id=True,
scope=p_to_apply.scope, **p_to_apply.kwargs)
else:
_nb_argnames = len(get_param_argnames_as_list(p_to_apply.argnames))
if _nb_argnames > 1:
_vals = [(NOT_USED,) * _nb_argnames]
else:
_vals = [NOT_USED]
c_with_dummy = self._parametrize_calls([c], p_to_apply.argnames, _vals,
indirect=p_to_apply.indirect, discard_id=True,
scope=p_to_apply.scope, **p_to_apply.kwargs)
assert len(c_with_dummy) == 1
calls[i] = c_with_dummy[0]
c = calls[i]
# B/ some non-parametrized fixtures may also need to be explicitly deactivated in some callspecs
# otherwise they will be setup/teardown.
#
# For this we use a dirty hack: we add a parameter with they name in the callspec, it seems to be propagated
# in the `request`. TODO is there a better way?
# for fixture in list(fix_closure_tree):
# for fixture_name, fixdef in self.metafunc._arg2fixturedefs.items():
for fixture_name in fix_closure_tree.get_not_always_used():
fixdef = self.metafunc._arg2fixturedefs[fixture_name]
if fixture_name not in c.params and fixture_name not in c.funcargs:
if not n.requires(fixture_name):
# explicitly add it as discarded by creating a parameter value for it.
c.params[fixture_name] = NOT_USED
c.indices[fixture_name] = 1
c._arg2scopenum[fixture_name] = get_pytest_scopenum(fixdef[-1].scope)
else:
# explicitly add it as active
c.params[fixture_name] = 'used'
c.indices[fixture_name] = 0
c._arg2scopenum[fixture_name] = get_pytest_scopenum(fixdef[-1].scope)
def _parametrize_calls(self, init_calls, argnames, argvalues, discard_id=False, indirect=False, ids=None,
scope=None, **kwargs):
"""Parametrizes the initial `calls` with the provided information and returns the resulting new calls"""
# make a backup so that we can restore the metafunc at the end
bak = self.metafunc._calls
# place the initial calls on the metafunc
self.metafunc._calls = init_calls if init_calls is not None else []
# parametrize the metafunc. Since we replaced the `parametrize` method on `metafunc` we have to call super
self.metafunc.__class__.parametrize(self.metafunc, argnames, argvalues, indirect=indirect, ids=ids,
scope=scope, **kwargs)
# extract the result
new_calls = self.metafunc._calls
# If the user wants to discard the newly created id, remove the last id in all these callspecs in this node
if discard_id:
for callspec in new_calls:
callspec._idlist.pop(-1)
# restore the metafunc and return the new calls
self.metafunc._calls = bak
return new_calls
def _process_node(self, current_node, pending, calls):
"""
Routine to apply all the parametrization orders in `pending` that are relevant to `current_node`,
to the `calls` (a list of pytest CallSpec2).
It returns a tuple containing a list of calls and a list of same length containing which leaf node each one
corresponds to.
:param current_node: the closure tree node we're focusing on
:param pending: a list of parametrization orders to apply
:param calls:
:return: a tuple (calls, nodes) of two lists of the same length. So that for each CallSpec calls[i], you can see
the corresponding leaf node in nodes[i]
"""
# (1) first apply all non-split fixtures at this node
fixtures_at_this_node = [f for f in current_node.fixture_defs.keys()
if f is not current_node.split_fixture_name]
# dirty hack if we want to preserve pytest legacy order when there are no children
# if current_node.parent is None and not current_node.has_split():
# # legacy compatibility: use pytest parametrization order even if it is wrong
# # see https://github.com/pytest-dev/pytest/issues/5054
#
# else:
# # rather trust the order we computed from the closure
# fixtures_to_process = fixtures_at_this_node
for fixturename in fixtures_at_this_node:
try:
# pop it from pending - do not rely the order in pending but rather the order in the closure node
p_to_apply = pending.pop(fixturename)
except KeyError:
# not a parametrized fixture
continue
else:
if isinstance(p_to_apply, UnionParamz):
raise ValueError("This should not happen !")
elif isinstance(p_to_apply, NormalParamz):
# ******** Normal parametrization **********
if _DEBUG:
print("[Node %s] Applying parametrization for NORMAL %s"
"" % (current_node.to_str(with_children=False, with_discarded=False),
p_to_apply.argnames))
calls = self._parametrize_calls(calls, p_to_apply.argnames, p_to_apply.argvalues,
indirect=p_to_apply.indirect, ids=p_to_apply.ids,
scope=p_to_apply.scope, **p_to_apply.kwargs)
else:
raise TypeError("Invalid parametrization type: %s" % p_to_apply.__class__)
# (2) then if there is a split apply it, otherwise return
if not current_node.has_split():
nodes = [current_node] * len(calls)
return calls, nodes
else:
try:
# pop it from pending - do not trust the order in pending.
p_to_apply = pending.pop(current_node.split_fixture_name)
except KeyError:
# not a parametrized fixture
raise ValueError("Error: fixture union parametrization not present")
else:
if isinstance(p_to_apply, NormalParamz):
raise ValueError("This should not happen !")
elif isinstance(p_to_apply, UnionParamz):
# ******** Union parametrization **********
if _DEBUG:
print("[Node %s] Applying parametrization for UNION %s"
"" % (current_node.to_str(with_children=False, with_discarded=False),
p_to_apply.union_fixture_name))
# always use 'indirect' since that's a fixture.
calls = self._parametrize_calls(calls, p_to_apply.union_fixture_name,
p_to_apply.alternative_names, indirect=True,
ids=p_to_apply.ids,
scope=p_to_apply.scope, **p_to_apply.kwargs)
# Change the ids
for callspec in calls:
callspec._idlist[-1] = apply_id_style(callspec._idlist[-1],
p_to_apply.union_fixture_name,
p_to_apply.alternative_names[0].idstyle)
# now move to the children
nodes_children = [None] * len(calls)
for i in range(len(calls)):
active_alternative = calls[i].params[p_to_apply.union_fixture_name]
child_node = current_node.children[active_alternative.fixture_name]
child_pending = pending.copy()
# place the childs parameter in the first position if it is in the list
# not needed anymore - already automatic
# try:
# child_pending.move_to_end(child_alternative, last=False)
# except KeyError:
# # not in the list: the child alternative is a non-parametrized fixture
# pass
calls[i], nodes_children[i] = self._process_node(child_node, child_pending, [calls[i]])
# finally flatten the list if needed
calls = flatten_list(calls)
nodes_children = flatten_list(nodes_children)
return calls, nodes_children
def _make_unique(lst):
_set = set()
def _first_time_met(v):
if v not in _set:
_set.add(v)
return True
else:
return False
return [v for v in lst if _first_time_met(v)]
def flatten_list(lst):
return [v for nested_list in lst for v in nested_list]
def sort_according_to_ref_list(fixturenames, param_names):
"""
Sorts items in the first list, according to their position in the second.
Items that are not in the second list stay in the same position, the others are just swapped.
A new list is returned.
:param fixturenames:
:param param_names:
:return:
"""
cur_indices = []
for pname in param_names:
cur_indices.append(fixturenames.index(pname))
target_indices = sorted(cur_indices)
sorted_fixturenames = list(fixturenames)
for old_i, new_i in zip(cur_indices, target_indices):
sorted_fixturenames[new_i] = fixturenames[old_i]
return sorted_fixturenames
_OPTION_NAME = 'with_reorder'
_SKIP = 'skip'
_NORMAL = 'normal'
_OPTIONS = {
_NORMAL: """(default) the usual reordering done by pytest to optimize setup/teardown of session- / module-
/ class- fixtures, as well as all the modifications made by other plugins (e.g. pytest-reorder)""",
_SKIP: """skips *all* reordering, even the one done by pytest itself or installed plugins
(e.g. pytest-reorder)"""
}
# @hookspec(historic=True)
def pytest_addoption(parser):
group = parser.getgroup('pytest-cases ordering', 'pytest-cases reordering options', after='general')
help_str = """String specifying one of the reordering alternatives to use. Should be one of :
- %s""" % ("\n - ".join(["%s: %s" % (k, v) for k, v in _OPTIONS.items()]))
group.addoption(
'--%s' % _OPTION_NAME.replace('_', '-'), type=str, default='normal', help=help_str
)
# @hookspec(historic=True)
def pytest_configure(config):
# validate the config
allowed_values = ('normal', 'skip')
reordering_choice = config.getoption(_OPTION_NAME)
if reordering_choice not in allowed_values:
raise ValueError("[pytest-cases] Wrong --%s option: %s. Allowed values: %s"
"" % (_OPTION_NAME, reordering_choice, allowed_values))
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_collection_modifyitems(session, config, items):
"""
An alternative to the `reorder_items` function in fixtures.py
(https://github.com/pytest-dev/pytest/blob/master/src/_pytest/fixtures.py#L209)
We basically set back the previous order once the pytest ordering routine has completed.
TODO we should set back an optimal ordering, but current PR https://github.com/pytest-dev/pytest/pull/3551
will probably not be relevant to handle our "union" fixtures > need to integrate the NOT_USED markers in the method
:param session:
:param config:
:param items:
:return:
"""
ordering_choice = config.getoption(_OPTION_NAME)
if ordering_choice == _SKIP:
# remember initial order
initial_order = copy(items)
yield
# put back the initial order but keep the filter
to_return = [None] * len(items)
i=0
for item in initial_order:
if item in items:
to_return[i] = item
i += 1
assert i == len(items)
items[:] = to_return
else:
# do nothing
yield
| [
"sylvain.marie@se.com"
] | sylvain.marie@se.com |
43af2e968300ab8273b4d068ebb206c0c17f8970 | 99af66869aad4d4baabb60d18f5b3e955e60920c | /pt28bak/get.py | 065eec943ab0daadca873d9db2c26c70fdb68ab0 | [] | no_license | zj008/pro28bak | 3abfd0600ce4223952541c01e88c097f9312a453 | cccbcbcfca0e5fe0329e44528d026df1c9be2b9f | refs/heads/master | 2022-08-22T19:24:46.956982 | 2020-05-21T10:10:28 | 2020-05-21T10:10:28 | 265,805,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from db import Sql
from datetime import datetime
def get_now_data(t):
sql = Sql()
ret = sql.execute(f"select id, alg0, alg1, alg2, alg3, alg4, alg5, alg6, alg7, alg8 from pt_{t} order by id desc limit 1")
ret = ret[0][0]
id = ret[0]
data = dict(
id=id,
alg0=ret[1],
alg1=ret[2],
alg2=ret[3],
alg3=ret[4],
alg4=ret[5],
alg5=ret[6],
alg6=ret[7],
alg7=ret[8],
alg8=ret[9],
)
ret = sql.execute(f"select pub_time from pt where id < {id} order by id desc limit 1")
try:
t = ret[0][0][0]
t = datetime.strptime(t, "%Y:%m:%d %H:%M").timestamp()
except Exception as e:
print(e)
t = 20
sql.close()
return dict(t=t, data=data)
if __name__ == '__main__':
get_now_data("big")
| [
"13864264792@163.com"
] | 13864264792@163.com |
4d97fad9266a037d603b5a43d20dff72f6a5cdfc | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/shipparts/WheelInteractive.py | ac3fc5be3f7f39bac48476e6aa630f9acf8c2189 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from pirates.interact.SimpleInteractive import SimpleInteractive
from pirates.piratesbase import PLocalizer
class WheelInteractive(SimpleInteractive):
def __init__(self, ship):
self.ship = ship
wheel = ship.model.locators.find('**/location_wheel')
if not wheel:
wheel = ship.model.root.attachNewNode('dummyWheel')
SimpleInteractive.__init__(self, wheel, 'wheel-%s' % ship.doId, PLocalizer.InteractWheel)
def interactionAllowed(self, avId):
return self.ship.canTakeWheel(avId)
def requestInteraction(self, avId):
self.ship.requestPilot(avId)
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
169801635325e499f566e3cf87e34c42139d7254 | 507b5f8409e8b7420f0d4c8192e8cfd12a210886 | /src/sdss_prediction.py | 223e675669f657e1e899c6c49f5bd10ec6d8aaff | [
"MIT"
] | permissive | ronaldpereira/galaxy-cluster-prediction | 7822eb43c9b96e32e3ef416e69364ea17cc50bae | 21117efbee60c924c814d2ae770df77898567364 | refs/heads/master | 2023-04-08T04:37:07.721657 | 2022-11-21T23:57:06 | 2022-11-21T23:57:06 | 157,046,719 | 0 | 0 | MIT | 2023-03-25T01:56:26 | 2018-11-11T04:20:06 | Python | UTF-8 | Python | false | false | 843 | py | import pandas as pd
from sklearn.model_selection import train_test_split
import libs.arg_parse_config as APC
import libs.data_preprocessing as DP
import libs.neural_network as NN
import warnings
warnings.filterwarnings('ignore')
args = APC.parser()
sdss = pd.read_csv(args.input_path)
data_prep = DP.DataPreprocessing()
# Uncomment the line below to balance QSO class
# sdss = data_prep.balance_class(sdss, 'QSO', 6)
encoded_classes = data_prep.class_encoder(sdss['class'])
x_train, x_test, y_train, y_test = train_test_split(sdss.drop('class', axis=1).values, encoded_classes, train_size=0.666)
nn = NN.NeuralNetwork(x_train.shape[1], y_train.shape[1], args.hidden_layers, args.hidden_layers_size, args.learning_rate)
nn.train(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs)
nn.test(x_test, y_test, data_prep.lb)
| [
"ronald.drp11@gmail.com"
] | ronald.drp11@gmail.com |
8a3ab9a44123fb4b8c27b04b394e1d4dd75d366f | 6d686e24d9fe716b999d6987bfeb612a527242c7 | /RunTrainedModels.py | 9f2a12bed9ea26aa5af94c77bc51e47956c391a2 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | seattlefella/cnnMNIST | 2180bbbe2da238d868963aaf7ac75ccd18240bd0 | 85418951a8bf8f42d5f7b92fc0cdff63f4b532d0 | refs/heads/master | 2021-04-06T04:21:49.639313 | 2018-03-12T01:48:02 | 2018-03-12T01:48:02 | 124,818,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | """
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
# Directives needed by tensorFlow
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Some misc. libraries that are useful
import keras.backend as K
from keras.models import load_model
import numpy as np
from matplotlib import pyplot
from tensorflow.examples.tutorials.mnist import input_data
import Util.ML_Utils as utils
# Some misc. libraries that are useful
import os
import sys
import time
import math
# Let's print out the versions of python, tensorFlow and keras
utils.print_lib_versions()
# Import data once for all runs
mnist = input_data.read_data_sets(utils.PATH_TO_DATA, one_hot=True)
testCount = 0
data = []
dt = np.dtype([('name', np.unicode_, 72), ('loss', np.float64), ('accuracy', np.float64)])
path = ('{0}3/').format(utils.PATH_TO_TRAINED_MODELS)
for filename in os.listdir(path):
print('---------{}---------'.format(filename))
model = load_model(path+filename)
score = model.evaluate(mnist.test.images, mnist.test.labels, verbose=0)
s = "Test Loss:{0:.4f} Test Accuracy{1:.4f}".format(score[0], score[1])
data.append((filename, score[0], -1*score[1]))
testCount+=1
if(testCount > 99) :
break
l = np.array(data, dtype=dt)
sortedData=np.sort(l, order='accuracy')
print('########################')
print(type(l))
print(l.shape)
print(l.dtype)
print(sortedData)
| [
"Seattlefella@ymail.com"
] | Seattlefella@ymail.com |
97149269400558d93a4ef6ec0d73377a66d2b056 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib/encodings/base64_codec.py | 731c567665b73aafb55074d0d5ffc115ec9270e3 | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,227 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
""" Python 'base64_codec' Codec - base64 content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, base64
### Codec APIs
def base64_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.encodestring(input)
return (output, len(input))
def base64_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.decodestring(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return base64_encode(input,errors)
def decode(self, input,errors='strict'):
return base64_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return base64.encodestring(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return base64.decodestring(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='base64',
encode=base64_encode,
decode=base64_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| [
"tberk@gmx.at"
] | tberk@gmx.at |
84935feeafc9305a180a4cda5d4038dc1716fe2c | a9976ab3959491a9f08a4c43e95aef80cb0d8332 | /ddpg.py | 96e1c370b9992f36eff1a873c9b87a75af2e1d3d | [] | no_license | jemdiggity/RL-Quadcopter-2 | aecf63f7dc484e6c1893ae5dd3ecbd9d4b18c16c | 12f03811409a4eaf426d8cbc67e08e08b89d4fbb | refs/heads/master | 2020-03-26T09:39:16.466275 | 2018-09-29T04:27:18 | 2018-09-29T04:34:28 | 144,757,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,321 | py | """
Implementation of DDPG - Deep Deterministic Policy Gradient
Algorithm and hyperparameter details can be found here:
http://arxiv.org/pdf/1509.02971v2.pdf
The algorithm is tested on the Pendulum-v0 OpenAI gym task
and developed with tflearn + Tensorflow
Author: Patrick Emami
"""
import tensorflow as tf
import numpy as np
import gym
from gym import wrappers
import tflearn
import argparse
import pprint as pp
from replay_buffer import ReplayBuffer
# ===========================
# Actor and Critic DNNs
# ===========================
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -action_bound and action_bound
"""
def __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.action_bound = action_bound
self.learning_rate = learning_rate
self.tau = tau
self.batch_size = batch_size
# Actor Network
self.inputs, self.out, self.scaled_out = self.create_actor_network()
self.network_params = tf.trainable_variables()
# Target Network
self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()
self.target_network_params = tf.trainable_variables()[
len(self.network_params):]
# Op for periodically updating target network with online network
# weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.unnormalized_actor_gradients = tf.gradients(
self.scaled_out, self.network_params, -self.action_gradient)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate).\
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(
self.network_params) + len(self.target_network_params)
def create_actor_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
net = tflearn.fully_connected(inputs, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
net = tflearn.fully_connected(net, 300)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(
net, self.a_dim, activation='tanh', weights_init=w_init)
# Scale output to -action_bound to action_bound
scaled_out = tf.multiply(out, self.action_bound)
return inputs, out, scaled_out
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.inputs: inputs,
self.action_gradient: a_gradient
})
def predict(self, inputs):
return self.sess.run(self.scaled_out, feed_dict={
self.inputs: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_scaled_out, feed_dict={
self.target_inputs: inputs
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, state_dim, action_dim, learning_rate, tau, gamma, num_actor_vars):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.gamma = gamma
# Create the critic network
self.inputs, self.action, self.out = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
# Op for periodically updating target network with online network
# weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
+ tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
self.optimize = tf.train.AdamOptimizer(
self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action.
# For each action in the minibatch (i.e., for each x in xs),
# this will sum up the gradients of each critic output in the minibatch
# w.r.t. that action. Each output is independent of all
# actions except for one.
self.action_grads = tf.gradients(self.out, self.action)
def create_critic_network(self):
inputs = tflearn.input_data(shape=[None, self.s_dim])
action = tflearn.input_data(shape=[None, self.a_dim])
net = tflearn.fully_connected(inputs, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(action, 300)
net = tflearn.activation(
tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(net, 1, weights_init=w_init)
return inputs, action, out
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.out, self.optimize], feed_dict={
self.inputs: inputs,
self.action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.out, feed_dict={
self.inputs: inputs,
self.action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_inputs: inputs,
self.target_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.inputs: inputs,
self.action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is
# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise:
def __init__(self, mu, sigma=0.3, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \
self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# ===========================
# Tensorflow Summary Ops
# ===========================
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax Value", episode_ave_max_q)
summary_vars = [episode_reward, episode_ave_max_q]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
# ===========================
# Agent Training
# ===========================
def train(sess, task, args, actor, critic, actor_noise):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
# Initialize replay memory
replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))
# Needed to enable BatchNorm.
# This hurts the performance on Pendulum but could be useful
# in other environments.
# tflearn.is_training(True)
for i in range(int(args['max_episodes'])):
s = task.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(int(args['max_episode_len'])):
if args['render_env']:
env.render()
# Added exploration noise
#a = actor.predict(np.reshape(s, (1, 3))) + (1. / (1. + i))
a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()
# s2, r, terminal, info = task.step(a[0])
s2, r, terminal = task.step(a[0])
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > int(args['minibatch_size']):
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(int(args['minibatch_size']))
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(int(args['minibatch_size'])):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + critic.gamma * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \
i, (ep_ave_max_q / float(j))))
task.dump()
break
from task import Task
def main(args):
with tf.Session() as sess:
# env = gym.make(args['env'])
num_episodes = 10000
# Modify the values below to give the quadcopter a different starting position.
runtime = 5
init_pose = np.array([0., 0., 100., 0., 0., 0.]) # initial pose
init_velocities = np.array([0., 0., 0.]) # initial velocities
init_angle_velocities = np.array([0., 0., 0.]) # initial angle velocities
target_pos = np.array([0., 0., 100.])
task = Task(init_pose, init_velocities, init_angle_velocities, runtime, target_pos)
np.random.seed(int(args['random_seed']))
tf.set_random_seed(int(args['random_seed']))
# env.seed(int(args['random_seed']))
state_dim = task.state_size #env.observation_space.shape[0]
action_dim = task.action_size #env.action_space.shape[0]
action_bound = 1 # rescale later #task.action_high #env.action_space.high
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
float(args['actor_lr']), float(args['tau']),
int(args['minibatch_size']))
critic = CriticNetwork(sess, state_dim, action_dim,
float(args['critic_lr']), float(args['tau']),
float(args['gamma']),
actor.get_num_trainable_vars())
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))
# if args['use_gym_monitor']:
# if not args['render_env']:
# env = wrappers.Monitor(
# env, args['monitor_dir'], video_callable=False, force=True)
# else:
# env = wrappers.Monitor(env, args['monitor_dir'], force=True)
train(sess, task, args, actor, critic, actor_noise)
# if args['use_gym_monitor']:
# env.monitor.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='provide arguments for DDPG agent')
# agent parameters
parser.add_argument('--actor-lr', help='actor network learning rate', default=0.0001)
parser.add_argument('--critic-lr', help='critic network learning rate', default=0.001)
parser.add_argument('--gamma', help='discount factor for critic updates', default=0.99)
parser.add_argument('--tau', help='soft target update parameter', default=0.001)
parser.add_argument('--buffer-size', help='max size of the replay buffer', default=1000000)
parser.add_argument('--minibatch-size', help='size of minibatch for minibatch-SGD', default=64)
# run parameters
parser.add_argument('--env', help='choose the gym env- tested on {Pendulum-v0}', default='Pendulum-v0')
parser.add_argument('--random-seed', help='random seed for repeatability', default=1234)
parser.add_argument('--max-episodes', help='max num of episodes to do while training', default=50000)
parser.add_argument('--max-episode-len', help='max length of 1 episode', default=1000)
parser.add_argument('--render-env', help='render the gym env', action='store_true')
parser.add_argument('--use-gym-monitor', help='record gym results', action='store_true')
parser.add_argument('--monitor-dir', help='directory for storing gym results', default='./results/gym_ddpg')
parser.add_argument('--summary-dir', help='directory for storing tensorboard info', default='./results/tf_ddpg')
parser.set_defaults(render_env=False)
parser.set_defaults(use_gym_monitor=False)
args = vars(parser.parse_args())
pp.pprint(args)
main(args)
| [
"jeremy.hale@gmail.com"
] | jeremy.hale@gmail.com |
10ef75e8ab159bfb954c98b309c7a3446da4659f | bce02a632920b36a9fcaae35689743d9a54ea47f | /aboutdialog.py | c2969d932ecb4113c2730fa11d8c56034dda47cb | [] | no_license | alexbruy/mergeshapes | 17add9cd2d05c037fb84ed4dc3312e0980c2a1e8 | 90d82864d9ca16e44bade18d8d7db9a89f2d60be | refs/heads/master | 2021-01-01T06:26:38.374255 | 2018-06-25T07:55:49 | 2018-06-25T07:55:49 | 11,588,825 | 0 | 5 | null | 2018-06-25T08:00:13 | 2013-07-22T18:29:00 | Python | UTF-8 | Python | false | false | 3,152 | py | # -*- coding: utf-8 -*-
#******************************************************************************
#
# MergeShapes
# ---------------------------------------------------------
# Merge multiple shapefiles to a single shapefile
#
# Copyright (C) 2010-2013 Alexander Bruy (alexander.bruy@gmail.com)
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/copyleft/gpl.html>. You can also obtain it by writing
# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#
#******************************************************************************
import os
import ConfigParser
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ui.ui_aboutdialogbase import Ui_Dialog
import resources_rc
class AboutDialog(QDialog, Ui_Dialog):
def __init__(self):
QDialog.__init__(self)
self.setupUi(self)
self.btnHelp = self.buttonBox.button(QDialogButtonBox.Help)
cfg = ConfigParser.SafeConfigParser()
cfg.read(os.path.join(os.path.dirname(__file__), "metadata.txt"))
version = cfg.get("general", "version")
self.lblLogo.setPixmap(QPixmap(":/icons/mergeshapes.png"))
self.lblVersion.setText(self.tr("Version: %s") % (version))
doc = QTextDocument()
doc.setHtml(self.getAboutText())
self.textBrowser.setDocument(doc)
self.textBrowser.setOpenExternalLinks(True)
self.buttonBox.helpRequested.connect(self.openHelp)
def reject(self):
QDialog.reject(self)
def openHelp(self):
overrideLocale = QSettings().value("locale/overrideFlag", False)
if not overrideLocale:
localeFullName = QLocale.system().name()
else:
localeFullName = QSettings().value("locale/userLocale", "")
localeShortName = localeFullName[0:2]
if localeShortName in ["ru", "uk"]:
QDesktopServices.openUrl(QUrl("http://hub.qgis.org/projects/mergeshapes/wiki"))
else:
QDesktopServices.openUrl(QUrl("http://hub.qgis.org/projects/mergeshapes/wiki"))
def getAboutText(self):
return self.tr("""<p>Merge multiple shapefiles into single one. Supports
recursive directory traversal, adds fields with input filename and path.</p>
<p><strong>Developers</strong>: Alexander Bruy</p>
<p><strong>Homepage</strong>: <a href="http://hub.qgis.org/projects/mergeshapes">http://hub.qgis.org/projects/mergeshapes</a></p>
<p>Please report bugs at <a href="http://hub.qgis.org/projects/mergeshapes/issues">bugtracker</a>.</p>"""
)
| [
"alexander.bruy@gmail.com"
] | alexander.bruy@gmail.com |
781728cd41d7b2d6039a59dec118afaea02aea57 | df3e3e937e85ae03bc6714bf9aa487d9338d44fd | /mpmp/exceptions.py | feb76e4c5975c4adf3db8b9f293ccc2c91ce9877 | [
"BSD-3-Clause"
] | permissive | mayala1925/mpmp | 9a6b4be43f9bc29874e9c0cdfa0866d70b61263c | 7bd4d49e4acd745447dc0018ac121d1a45e8bfbc | refs/heads/master | 2023-08-16T13:23:08.019630 | 2021-10-13T23:09:07 | 2021-10-13T23:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | """
Exceptions specific to pan-cancer prediction experiments
"""
class ResultsFileExistsError(Exception):
"""
Custom exception to raise when the results file already exists for the
given gene and cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class NoTrainSamplesError(Exception):
"""
Custom exception to raise when there are no train samples in a
cross-validation fold for a given cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class NoTestSamplesError(Exception):
"""
Custom exception to raise when there are no test samples in a
cross-validation fold for a given cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class OneClassError(Exception):
"""
Custom exception to raise when there is only one class present in the
test set for the given cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class GenesNotFoundError(Exception):
"""
Custom exception to raise when genes provided for classification are not
part of existing datasets with oncogene/TSG info.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
| [
"jjc2718@gmail.com"
] | jjc2718@gmail.com |
e2941e8af32e3746da3b3738713033ba63d61872 | df197ff998c3f18276ff447aba859eb546eb866e | /Allegro/allegro/allegro/spiders/AllegroSpider.py | e3daee80f488f54553402b4dd82c29561eb69b21 | [] | no_license | Ferbes/Webscrapper | d163190c58f8771a72b715098e9dfede68ca4713 | ad891140e6dbf8da415a9fabb55b837b7c2d9c0e | refs/heads/master | 2021-01-18T22:55:54.973695 | 2016-06-17T21:27:57 | 2016-06-17T21:27:57 | 54,018,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py |
import scrapy
from scrapy.spiders import SitemapSpider
from scrapy.selector import Selector
from scrapy.exceptions import DropItem
from allegro.items import AllegroItem
class AllegroSpider(SitemapSpider):
name = 'allegro-spider'
allowed_domains = ['allegro.pl']
sitemap_urls = ['http://allegro.pl/robots.txt']
sitemap_rules = [
('sportowe-nike', 'parse_nike'),
]
def parse_nike(self, response):
yield {
'price': response.selector.xpath('//meta[@itemprop="price"]/@content').extract(),
'title': response.selector.xpath('//meta[@itemprop="name"]/@content').extract(),
'image': response.selector.xpath('//meta[@itemprop="image"]/@content').extract(),
'url': response.selector.xpath('//meta[@itemprop="url"]/@content').extract()
} | [
"konrad.ferbes@gmail.com"
] | konrad.ferbes@gmail.com |
8fda8333924bdd0b3d4d4a1fc03469652dc5986d | df823d33423d37251c49b4be12ee022170138071 | /python/mycurses.py | 198e2605f0b0559ffdd2ed3200c896e81e5f5c89 | [] | no_license | von/sandbox | ca2a87870f0f5e3153cb33fd940f1b4cb9da7985 | 5e47e93c32bc85f986f39b1d4df8a384c7ff0019 | refs/heads/main | 2023-04-30T02:14:36.466490 | 2023-04-18T14:11:54 | 2023-04-18T14:11:54 | 331,739 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | #!/usr/bin/env python3
import curses
import os
def main(stdscr):
win1 = curses.newwin(3, 30, 2,0)
win1.border()
win2 = curses.newwin(10, 30, 10,0)
win2.border()
stdscr.addstr(0,0, "Testing...")
win1.addstr(0,0, "Foobar")
win2.addstr(0,0, "I win")
stdscr.refresh()
win1.refresh()
win2.refresh()
stdscr.getch()
win2.clear()
win2.addstr(0,0, "2..3..")
win2.refresh()
stdscr.getch()
ls = os.popen("ls")
for i,line in enumerate(ls):
try:
win2.addstr(i, 0, line.encode("utf-8"))
except curses.error:
# Assume we've hit the end of the window
break
win2.refresh()
stdscr.getch()
curses.wrapper(main)
| [
"von@vwelch.com"
] | von@vwelch.com |
2bc1432323a455395c7e8d97b4f3896a33278eb9 | c1c00ced90d47b9425fa11b6e0e5148a26a70085 | /tests/test_cli.py | d3438f75559b5b4993b1f8da97e7d6b0531eb024 | [
"MIT"
] | permissive | destos/Patterner | a8e90e30f0f2ca9411beb39e4cb8ef9e25fedc23 | 3e32468e843ec817b94da9df543c891ca69927fc | refs/heads/master | 2020-04-25T14:44:23.872391 | 2019-02-27T05:50:01 | 2019-02-27T05:50:01 | 172,852,064 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | """Sample integration test module using pytest-describe and expecter."""
# pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned
import pytest
from click.testing import CliRunner
from expecter import expect
from patterner.cli import main
@pytest.fixture
def runner():
return CliRunner()
def describe_cli():
def describe_conversion():
def when_integer(runner):
result = runner.invoke(main, ['42'])
expect(result.exit_code) == 0
expect(result.output) == "12.80165\n"
def when_invalid(runner):
result = runner.invoke(main, ['foobar'])
expect(result.exit_code) == 0
expect(result.output) == ""
| [
"patrick@forringer.com"
] | patrick@forringer.com |
6c8747ad2ed115b2787072a342e14f0d14836005 | f35bf19264008cf5e48b3d512a13eee643320293 | /python及爬虫/初级爬虫实例/BasedOnTheCrawler/HtmlParser.py | 97f8e544f6bf83daac2d6e75927c36c0f56e9f4d | [] | no_license | 1446373769/NewCode | 12b2be96e9efe275d39091bd2f84341dea6c3775 | 314b2cd85c66bf7f682418459ca436e82c3488f5 | refs/heads/master | 2020-05-09T22:53:46.474404 | 2019-04-15T14:06:17 | 2019-04-15T14:06:17 | 148,277,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | import re
from urllib.parse import urlparse
from urllib.parse import urljoin
from bs4 import BeautifulSoup
class HtmlParser(object):
def parser(self,page_url,html_cont):
'''用于解析网页内容,抽取url和数据
:param page_url:下载页面的url
:param html_cont:下载的网页内容
:return:返回url和数据
'''
if page_url is None or html_cont is None:
return
soup=BeautifulSoup(html_cont,'html.parser')
new_urls=self._get_new_urls(page_url,soup)
new_data=self._get_new_data(page_url,soup)
return new_urls,new_data
def _get_new_urls(self,page_url,soup):
'''抽取新的url集合
:param page_url:下载页面的url
:param soup:soup
:return:返回新的url集合'''
new_urls=set()
#抽取符合要求的a标记
links=soup.find_all('a',href=re.compile(r'/item'))
for link in links:
# 提取href属性
new_url=link['href']
#拼接完整网址
new_full_url=urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls
def _get_new_data(self,page_url,soup):
'''抽取有效数据
:param page_url:下载页面的url
:param soup:soup
:return:返回新的url集合'''
data={}
data['url']=page_url
title=soup.find('dd',class_='lemmaWgt-lemmaTitle-title').find('h1')
print(title)
data['title']=title.get_text()
summary=soup.find('div',class_='lemma-summary')
#获取tag中包含的所有文本内容,包括子孙tag中的内容并将结果作为Unicode字符串返回
data['summary']=summary.get_text()
return data
| [
"1446373769@qq.com"
] | 1446373769@qq.com |
184b601a9277e7e6f8aa27a0c38c389b529ad172 | 59b3dce3c770e70b2406cc1dd623a2b1f68b8394 | /python_3/lessons/Timing_Computations/src/ count_thirtyone_days.py | 682d87d1e84181941930cc296f2428ddc1f00032 | [] | no_license | patrickbeeson/python-classes | 04ed7b54fc4e1152a191eeb35d42adc214b08e39 | b5041e71badd1ca2c013828e3b2910fb02e9728f | refs/heads/master | 2020-05-20T07:17:36.693960 | 2015-01-23T14:41:46 | 2015-01-23T14:41:46 | 29,736,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from datetime import datetime, timedelta
now = datetime.now()
delta = timedelta(31)
delivery = now + delta
print("Today: %s" % now.strftime("%d"))
print("Delivery: %s" % delivery.strftime("%d"))
| [
"patrickbeeson@gmail.com"
] | patrickbeeson@gmail.com |
7581ff553d9d2380b9a3fa8d04bc19aa2433dd6d | 664c3ced94ab0e9a5bac547028db59a3ca1f2074 | /10. Use classes to create active objects /EG10-07 Time Tracker with exception handler.py | 6ab5ddc9eaa23b6a53954e815d434c237f056033 | [
"MIT"
] | permissive | nikcbg/Begin-to-Code-with-Python | 2b1283a7818e26d3471677b51d1832cde52c4ddc | a72fdf18ca15f564be895c6394a91afc75fc3e2c | refs/heads/master | 2021-06-23T23:09:36.009442 | 2021-06-23T11:17:24 | 2021-06-23T11:17:24 | 209,285,197 | 0 | 0 | MIT | 2021-03-17T07:48:09 | 2019-09-18T10:50:51 | Python | UTF-8 | Python | false | false | 6,121 | py | # EG10-07 Time Tracker with exception handler
import pickle
from BTCInput import *
# Create the contact class
class Contact:
min_session_length = 0.5
max_session_length = 3.5
@staticmethod
def validate_session_length(session_length):
'''
Validates a session length and returns
True if the session is valid or False if not
'''
if session_length < Contact.min_session_length:
return False
if session_length > Contact.max_session_length:
return False
return True
def __init__(self, name, address, telephone):
self.name = name
self.address = address
self.telephone = telephone
self.hours_worked = 0
def get_hours_worked(self):
'''
Gets the hours worked for this contact
'''
return self.hours_worked
def add_session(self, session_length):
'''
Adds the value of the parameter
onto the hours spent with this contact
Raises an exception if the session length is invalid
'''
if not Contact.validate_session_length(session_length):
raise Exception('Invalid session length')
self.hours_worked = self.hours_worked + session_length
return
def new_contact():
'''
Reads in a new contact and stores it
'''
print('Create new contact')
# add the data attributes
name=read_text('Enter the contact name: ')
address=read_text('Enter the contact address: ')
telephone=read_text('Enter the contact phone: ')
# create a new instance
new_contact=Contact(name=name,address=address,telephone=telephone)
# add the new contact to the contact list
contacts.append(new_contact)
def find_contact(search_name):
'''
Finds the contact with the matching name
Returns a contact instance or None if there is
no contact with the given name
'''
# remove any whitespace from around the search name
search_name = search_name.strip()
# convert the search name to lower case
search_name = search_name.lower()
for contact in contacts:
# get the name out of the contact
name=contact.name
# remove any whitespace from around the name
name=name.strip()
# convert the name to lower case
name = name.lower()
# see if the names match
if name.startswith(search_name):
# return the contact that was found
return contact
# if we get here no contact was found
# with the given name
return None
def display_contact():
'''
Reads in a name to search for and then displays
the content information for that name or a
message indicating that the name was not found
'''
print('Find contact')
search_name = read_text('Enter the contact name: ')
contact=find_contact(search_name)
if contact!=None:
# Found a contact
print('Name:', contact.name)
print('Address:', contact.address)
print('Telephone:', contact.telephone)
print('Hours on the case:', contact.get_hours_worked())
else:
print('This name was not found.')
def edit_contact():
'''
Reads in a name to search for and then allows
the user to edit the details of that contact
If there is no contact the funciton displays a
message indicating that the name was not found
'''
print('Edit contact')
search_name=read_text('Enter the contact name: ')
contact=find_contact(search_name)
if contact!=None:
# Found a contact
print('Name: ',contact.name)
new_name=read_text('Enter new name or . to leave unchanged: ')
if new_name!='.':
contact.name=new_name
new_address=read_text('Enter new address or . to leave unchanged: ')
if new_address!='.':
contact.address=new_address
new_phone=read_text('Enter new telephone or . to leave unchanged: ')
if new_phone!='.':
contact.telephone=new_phone
else:
print('This name was not found.')
def add_session_to_contact():
'''
Reads in a name to search for and then allows
the user to add a session spent working for
that contact
'''
print('add session')
search_name=read_text('Enter the contact name: ')
contact=find_contact(search_name)
if contact!=None:
# Found a contact
print('Name: ',contact.name)
print('Previous hours worked:',contact.get_hours_worked())
session_length=read_float(prompt='Session length: ')
try:
contact.add_session(session_length)
print('Updated hours worked:', contact.get_hours_worked())
except Exception as e:
print('Add hours failed:',e)
else:
print('This name was not found.')
def save_contacts(file_name):
'''
Saves the contacts to the given filename
Contacts are stored in binary as pickled file
Exceptions will be raised if the save fails
'''
print('save contacts')
with open(file_name,'wb') as out_file:
pickle.dump(contacts,out_file)
def load_contacts(file_name):
'''
Loads the contacts from the given filename
Contacts are stored in binary as pickled file
Exceptions will be raised if the load fails
'''
global contacts
print('Load contacts')
with open(file_name,'rb') as input_file:
contacts=pickle.load(input_file)
menu='''Time Tracker
1. New Contact
2. Find Contact
3. Edit Contact
4. Add Session
5. Exit Program
Enter your command: '''
filename='contacts.pickle'
try:
load_contacts(filename)
except:
print('Contacts file not found')
contacts=[]
while True:
command=read_int_ranged(prompt=menu,min_value=1,max_value=5)
if command==1:
new_contact()
elif command==2:
display_contact()
elif command==3:
edit_contact()
elif command==4:
add_session_to_contact()
elif command==5:
save_contacts(filename)
break
| [
"nkcbg@yahoo.com"
] | nkcbg@yahoo.com |
b9c8c3198ea3b91ca79b7666122aeba124f8d46b | 8be217fe977aa0bcd9e375c75b0fb522f5bf0101 | /mergetwosortedlists21.py | 0065e807191a8350423dd2e81ae12019e30106ab | [] | no_license | blueones/LeetcodePractices | c63a5e773bebea17e988e8bb4962e012d7d402ba | 194375ba0c07e420f420aafec98aede2f9f5d8fa | refs/heads/master | 2021-07-14T14:21:55.389334 | 2021-01-24T22:13:21 | 2021-01-24T22:13:21 | 230,814,709 | 0 | 1 | null | 2020-02-25T02:58:04 | 2019-12-29T23:18:25 | Python | UTF-8 | Python | false | false | 947 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
beforeN = ListNode(0)
dummyN = beforeN
if l1 == None and l2 == None:
return None
while l1 or l2:
#print("here?")
if l1 and l2:
if l1.val >= l2.val:
beforeN.next = l2
l2 = l2.next
else:
beforeN.next = l1
l1 = l1.next
elif l1 == None:
beforeN.next = l2
break
elif l2 == None:
beforeN.next = l1
break
beforeN = beforeN.next
return dummyN.next
sunnyNode = ListNode(1)
sunnyNode2 = None
print(Solution().mergeTwoLists(sunnyNode,sunnyNode2))
| [
"yiq.shang@gmail.com"
] | yiq.shang@gmail.com |
c34bf188a265f8034da6248364ca612ac567b648 | 2c906d9728ca9a0c61fb3abbe5d49d5889abb7cb | /Experiments/Tools/convert_ascii_to_qscore.py | 05f5215a8189bb94e6a68a970ad559301bd43b95 | [] | no_license | si-medbif/FastFASTQTransfer | 7d9d56cbe0975947a22d7b598c1149b973123f26 | 22404557f63e39c373a391a7b180f6929ee0d23d | refs/heads/master | 2023-05-04T20:42:07.557599 | 2021-05-13T18:20:35 | 2021-05-13T18:20:35 | 293,693,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | from joblib import Parallel, delayed
import sys
import os
import glob
import random
def transform_file (file_path, destination) :
input_file = open(file_path, 'r')
full_destination_path = destination + '/' + file_path.split('\\')[-1]
line_counter = 1
lines_buffer = []
score_line = input_file.readline()
while score_line != "" :
lines_buffer.append(str(int(score_line.strip())-33))
if line_counter == 10000000:
write_to_file(lines_buffer, full_destination_path)
lines_buffer.clear()
line_counter = 0
line_counter += 1
score_line = input_file.readline()
input_file.close()
def write_to_file (lines, destination) :
output_file = open(destination, 'a')
for line in lines :
output_file.write(line + '\n')
output_file.close()
def main (args) :
Parallel(n_jobs=-1, prefer="processes", verbose=10)(
delayed(transform_file)(file_name, 'position_process/quality_merged_transformed')
for file_name in sorted(glob.glob('position_process/quality_merged/*'))
)
if __name__ == "__main__":
main(sys.argv) | [
"peter_arnon1@icloud.com"
] | peter_arnon1@icloud.com |
d854a9656861182f77a2b70cf3e1606c7efd126d | a6f88a9a33f2da39f1c297864ac29e4395dcb817 | /Beginners/stdlib/csv/solution.py | ae1449f15a10d1b1205c7e0b649e3453654f78fd | [
"BSD-3-Clause"
] | permissive | csherfield/PythonTrainingExercises | b5e56c9ee67377f80404efc804df54cf06e0a366 | f9fb84af4f60d71d68f00c646c0949312b90ba3c | refs/heads/master | 2020-03-10T00:25:40.444426 | 2018-04-11T17:37:38 | 2018-04-11T17:37:38 | 129,081,385 | 0 | 0 | BSD-3-Clause | 2018-04-11T11:12:13 | 2018-04-11T11:12:13 | null | UTF-8 | Python | false | false | 1,859 | py | """Create a function that reads a CSV file and return a list of dictionaries
where the keys are the names in the first row and each dict contains the values
in each subsequent row.
A CSV file 'data.csv' is provided.
Issues: What happens if the row is to long or two short?
What happens with duplicate names in the first row.
Stretch: Convert the DoB strings to actual Python datetime.dates.
Created on 18 Feb 2016
@author: paulross
"""
import csv
import pytest
def read_csv(file_name='data.csv'):
"""Returns a list of dicts from a csv file.
This does it the hard way."""
raw_result = []
with open(file_name) as f:
reader = csv.reader(f)
for row in reader:
raw_result.append(row)
keys = raw_result[0]
result = []
for row in raw_result[1:]:
result.append({keys[i] : val for i, val in enumerate(row)})
return result
def read_csv(file_name='data.csv'):
"""Returns a list of dicts from a csv file.
This is somewhat easier."""
with open(file_name) as f:
reader = csv.DictReader(f)
return [row for row in reader]
def test_read_csv():
expected = [
{'DoB': '08/18/2007', 'Name': 'Annabel', 'Ordinal': '1'},
{'DoB': '08/19/2007', 'Name': 'Brian', 'Ordinal': '2'},
{'DoB': '08/20/2007', 'Name': 'Charlie', 'Ordinal': '3'},
{'DoB': '08/21/2007', 'Name': 'Derek', 'Ordinal': '4'},
{'DoB': '08/22/2007', 'Name': 'Emily', 'Ordinal': '5'},
{'DoB': '08/23/2007', 'Name': 'Fortune', 'Ordinal': '6'},
{'DoB': '08/24/2007', 'Name': 'Gerald', 'Ordinal': '7'},
{'DoB': '08/25/2007', 'Name': 'Harriet', 'Ordinal': '8'},
{'DoB': '08/26/2007', 'Name': 'India', 'Ordinal': '9'}
]
assert expected == read_csv()
def main():
return pytest.main(__file__)
if __name__ == '__main__':
main()
| [
"apaulross@gmail.com"
] | apaulross@gmail.com |
1c83965a3a194a42d6aeef21ffa870582a214071 | 707eca7efb59322d7fdd4f72a97d2281f37dda02 | /engines/trading_engine.py | 278da5c029132aaaa2398d5389c6a52c7a0666cc | [
"Apache-2.0"
] | permissive | maollm/futu_algo | 8e44736e78bfcaa9c8fe29f04e33b1850b8225c6 | 302f7145a14346e7bb8794fa5d2beccd86735a7d | refs/heads/master | 2023-04-01T22:23:18.632417 | 2021-04-13T19:12:27 | 2021-04-13T19:12:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,614 | py | # Futu Algo: Algorithmic High-Frequency Trading Framework
# Copyright (c) billpwchan - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Bill Chan <billpwchan@hotmail.com>, 2021
import configparser
import datetime
import glob
import itertools
from datetime import date
from futu import *
from engines import data_engine
from handlers.cur_kline_handler import CurKlineHandler
from handlers.rt_data_handler import RTDataHandler
from handlers.stock_quote_handler import StockQuoteHandler
from util import logger
class FutuTrade:
def __init__(self):
"""
Futu Trading Engine Constructor
"""
self.config = configparser.ConfigParser()
self.config.read("config.ini")
self.quote_ctx = OpenQuoteContext(host=self.config['FutuOpenD.Config'].get('Host'),
port=self.config['FutuOpenD.Config'].getint('Port'))
self.trade_ctx = OpenHKTradeContext(host=self.config['FutuOpenD.Config'].get('Host'),
port=self.config['FutuOpenD.Config'].getint('Port'))
self.username = self.config['FutuOpenD.Credential'].get('Username')
# self.password = self.config['FutuOpenD.Credential'].get('Password')
self.password_md5 = self.config['FutuOpenD.Credential'].get('Password_md5')
self.futu_data = data_engine.DatabaseInterface(database_path=self.config['Database'].get('Database_path'))
self.default_logger = logger.get_logger("futu_trade")
self.trd_env = TrdEnv.REAL if self.config.get('FutuOpenD.Config', 'TrdEnv') == 'REAL' else TrdEnv.SIMULATE
# Futu-Specific Variables
self.market_list = [Market.HK, Market.US, Market.SH, Market.SZ, Market.HK_FUTURE, Market.SG, Market.JP]
self.security_type_list = [SecurityType.BOND, SecurityType.BWRT, SecurityType.STOCK, SecurityType.WARRANT,
SecurityType.IDX, SecurityType.ETF, SecurityType.FUTURE, SecurityType.PLATE,
SecurityType.PLATESET]
self.reference_type_list = [SecurityReferenceType.WARRANT, SecurityReferenceType.FUTURE]
def __del__(self):
"""
Default Cleanup Operations for Futu Trade Engine. Disconnect all Quote & Trade Connections
"""
self.default_logger.info("Deleting Quote_CTX Connection")
self.quote_ctx.close() # 关闭当条连接,FutuOpenD会在1分钟后自动取消相应股票相应类型的订阅
self.default_logger.info("Deleting Trade_CTX Connection")
self.trade_ctx.close() # 关闭当条连接,FutuOpenD会在1分钟后自动取消相应股票相应类型的订阅
def __unlock_trade(self):
"""
Unlock Trading Account if TrdEnv.REAL
"""
if self.trd_env == TrdEnv.REAL:
ret, data = self.trade_ctx.unlock_trade(password_md5=self.password_md5)
if ret == RET_OK:
self.default_logger.info("Account Unlock Success.")
else:
raise Exception("Account Unlock Unsuccessful: {}".format(data))
def __save_historical_data(self, stock_code: str, start_date: date, end_date: date = None,
k_type: object = KLType, force_update: bool = False) -> bool:
"""
Save Historical Data (e.g., 1D, 1W, etc.) from FUTU OpenAPI to ./data folder. Saved in CSV Format
:param stock_code: Stock Code with Format (e.g., HK.00001)
:param start_date: Datetime Object that specifies the start date
:param end_date: Datetime Object that specifies the end date. If left as None, it will be automatically calculated as 365 days after start_date
:param k_type: FuTu KLType Object
:return: bool
"""
out_dir = f'./data/{stock_code}'
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if k_type == KLType.K_DAY:
output_path = f'./data/{stock_code}/{stock_code}_{start_date.year}_1D.csv'
elif k_type == KLType.K_WEEK:
output_path = f'./data/{stock_code}/{stock_code}_{start_date.year}_1W.csv'
else:
self.default_logger.error(f'Unsupported KLType. Please try it later.')
return False
# Ensure update current day's 1M data & current year's 1D data
if os.path.exists(output_path) and not force_update and (
(start_date != datetime.today().date() and k_type == KLType.K_1M) or
(start_date.year != datetime.today().date().year and (
k_type == KLType.K_DAY or k_type == KLType.K_WEEK))
):
return False
# Request Historical K-line Data (Daily)
start_date = start_date.strftime("%Y-%m-%d")
end_date = end_date.strftime("%Y-%m-%d") if end_date is not None else None
while True:
ret, data, page_req_key = self.quote_ctx.request_history_kline(stock_code, start=start_date,
end=end_date,
ktype=k_type, autype=AuType.QFQ,
fields=[KL_FIELD.ALL],
max_count=1000, page_req_key=None,
extended_time=False)
if ret == RET_OK:
data.to_csv(output_path, index=False, encoding='utf-8-sig')
self.default_logger.info(f'Saved: {output_path}')
self.__store_data_database(data, k_type=k_type)
return True
else:
# Retry Storing Data due to too frequent requests (max. 60 requests per 30 seconds)
time.sleep(1)
self.default_logger.error(f'Historical Data Store Error: {data}')
def __store_data_database(self, data, k_type):
for index, row in data.iterrows():
self.futu_data.add_stock_data(row['code'], row['time_key'], row['open'], row['close'], row['high'],
row['low'], row['pe_ratio'], row['turnover_rate'], row['volume'],
row['turnover'], row['change_rate'], row['last_close'], k_type)
self.futu_data.commit()
def get_market_state(self):
"""
获取全局状态
:return: (ret, data)
ret == RET_OK data为包含全局状态的字典,含义如下
ret != RET_OK data为错误描述字符串
===================== =========== ==============================================================
key value类型 说明
===================== =========== ==============================================================
market_sz str 深圳市场状态,参见MarketState
market_us str 美国市场状态,参见MarketState
market_sh str 上海市场状态,参见MarketState
market_hk str 香港市场状态,参见MarketState
market_hkfuture str 香港期货市场状态,参见MarketState
market_usfuture str 美国期货市场状态,参见MarketState
server_ver str FutuOpenD版本号
trd_logined str '1':已登录交易服务器,'0': 未登录交易服务器
qot_logined str '1':已登录行情服务器,'0': 未登录行情服务器
timestamp str Futu后台服务器当前时间戳(秒)
local_timestamp double FutuOpenD运行机器当前时间戳(
===================== =========== ==============================================================
"""
return self.quote_ctx.get_global_state()
def get_referencestock_list(self, stock_code: str) -> pd.DataFrame:
"""
获取证券的关联数据
:param code: 证券id,str,例如HK.00700
:return: (ret, data)
ret == RET_OK 返回pd dataframe数据,数据列格式如下
ret != RET_OK 返回错误字符串
======================= =========== ==============================================================================
参数 类型 说明
======================= =========== ==============================================================================
code str 证券代码
lot_size int 每手数量
stock_type str 证券类型,参见SecurityType
stock_name str 证券名字
list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间)
wrt_valid bool 是否是窝轮,如果为True,下面wrt开头的字段有效
wrt_type str 窝轮类型,参见WrtType
wrt_code str 所属正股
future_valid bool 是否是期货,如果为True,下面future开头的字段有效
future_main_contract bool 是否主连合约(期货特有字段)
future_last_trade_time string 最后交易时间(期货特有字段,非主连期货合约才有值)
======================= =========== ==============================================================================
"""
output_df = pd.DataFrame()
for security_reference_type in self.security_type_list:
ret, data = self.quote_ctx.get_referencestock_list(stock_code, security_reference_type)
if ret == RET_OK:
self.default_logger.info(f"Received Reference Stock List for {stock_code}")
output_df = pd.concat([output_df, data], ignore_index=True)
else:
self.default_logger.error(f"Cannot Retrieve Reference Stock List for {stock_code}")
return output_df
def get_filtered_turnover_stocks(self) -> list:
"""
A quick way to get all stocks with at least 100 million HKD turnover and a stock price >= 1 HKD
:return:
"""
simple_filter = SimpleFilter()
simple_filter.filter_min = 1
simple_filter.stock_field = StockField.CUR_PRICE
simple_filter.is_no_filter = False
financial_filter = AccumulateFilter()
financial_filter.filter_min = 100000000
financial_filter.stock_field = StockField.TURNOVER
financial_filter.is_no_filter = False
financial_filter.sort = SortDir.ASCEND
financial_filter.days = 10
begin_index = 0
output_list = []
while True:
ret, ls = self.quote_ctx.get_stock_filter(market=Market.HK, filter_list=[simple_filter, financial_filter],
begin=begin_index) # 对香港市场的股票做简单和财务筛选
if ret == RET_OK:
last_page, all_count, ret_list = ls
output_list.extend([item.stock_code for item in ret_list])
begin_index += 200
if begin_index >= all_count:
break
elif ret == RET_ERROR:
return []
return output_list
def get_data_realtime(self, stock_list: list, sub_type: SubType = SubType.K_1M, kline_num: int = 1000) -> dict:
"""
Receive real-time K-Line data as initial technical indicators observations
注意:len(code_list) * 订阅的K线类型的数量 <= 100
:param stock_list: List of selected stocks ['HK.00009', 'HK.00001']
:param sub_type: Futu subscription type
:param kline_num: Number of observations (i.e., default to 100)
:return: dictionary of k-line data
"""
input_data = {}
ret_sub, err_message = self.quote_ctx.subscribe(stock_list, [sub_type], subscribe_push=False)
for stock_code in stock_list:
if ret_sub == RET_OK: # 订阅成功
ret, data = self.quote_ctx.get_cur_kline(stock_code, kline_num, sub_type, AuType.QFQ)
if ret == RET_OK:
input_data[stock_code] = input_data.get(stock_code, data)
else:
self.default_logger.error(f'Cannot get Real-time K-line data: {data}')
return input_data
def update_1M_data(self, stock_code: str, years=2, force_update: bool = False) -> None:
"""
Update 1M Data to ./data/{stock_code} folders for max. 2-years duration
:param force_update:
:param stock_code: Stock Code with Format (e.g., HK.00001)
:param years: 2 years
"""
column_names = json.loads(self.config.get('FutuOpenD.DataFormat', 'HistoryDataFormat'))
history_df = pd.DataFrame(columns=column_names)
# If force update, update all 2-years 1M data. Otherwise only update the last week's data
start_date = str((datetime.today() - timedelta(days=round(365 * years))).date()) if force_update else str(
(datetime.today() - timedelta(days=30)).date())
end_date = str(datetime.today().date())
# This will give a list of dates between 2-years range
date_range = pd.date_range(start_date, end_date, freq='d').strftime("%Y-%m-%d").tolist()
# Retrieve the first page
ret, data, page_req_key = self.quote_ctx.request_history_kline(stock_code,
start=start_date,
end=end_date,
ktype=KLType.K_1M, autype=AuType.QFQ,
fields=[KL_FIELD.ALL],
max_count=1000, page_req_key=None,
extended_time=False)
if ret == RET_OK:
history_df = pd.concat([history_df, data], ignore_index=True)
else:
self.default_logger.error(f'Cannot get Historical K-line data: {data}')
return
# 请求后面的所有结果
while page_req_key is not None:
# The inner loop is to ensure that whenever there is an error, we can re-try until it success
while True:
original_page_req_key = page_req_key
ret, data, page_req_key = self.quote_ctx.request_history_kline(stock_code,
start=start_date,
end=end_date,
ktype=KLType.K_1M, autype=AuType.QFQ,
fields=[KL_FIELD.ALL],
max_count=1000,
page_req_key=page_req_key,
extended_time=False)
if ret == RET_OK:
history_df = pd.concat([history_df, data], ignore_index=True)
break
else:
self.default_logger.error(f'Cannot get Historical K-line data: {data}')
# Revert back to previous page req key and re-try again
page_req_key = original_page_req_key
time.sleep(1)
for input_date in date_range:
output_path = f'./data/{stock_code}/{stock_code}_{input_date}_1M.csv'
output_df = history_df[history_df['time_key'].str.contains(input_date)]
output_df.to_csv(output_path, index=False, encoding='utf-8-sig')
self.default_logger.info(f'Saved: {output_path}')
self.__store_data_database(output_df, k_type=KLType.K_1M)
def update_DW_data(self, stock_code: str, years=10, force_update: bool = False,
k_type: KLType = KLType.K_DAY) -> None:
"""
Update 1D Data (365 days per file) to ./data/{stock_code} folders for max. 2-years duration
:param force_update:
:param stock_code: Stock Code with Format (e.g., HK.00001)
:param years: 10 years
:param k_type: Futu K-Line Type
"""
for i in range(0, round(years + 1)):
day = date((datetime.today() - timedelta(days=i * 365)).year, 1, 1)
if not self.__save_historical_data(stock_code=stock_code, start_date=day,
k_type=k_type, force_update=force_update):
continue
time.sleep(0.6)
def update_owner_plate(self, stock_list: list):
"""
Update Owner Plate information for all equities in Hong Kong Kong stock market.
:param stock_list: A list of all equities (i.e., stock code)
"""
# Slice the list into 200-elements per list
stock_lists = [stock_list[i:i + 200] for i in range(0, len(stock_list), 200)]
output_df = pd.DataFrame()
for stock_list in stock_lists:
ret, data = self.quote_ctx.get_owner_plate(stock_list)
if ret == RET_OK:
output_df = pd.concat([output_df, data], ignore_index=True)
else:
self.default_logger.error(f'Cannot get Owner Plate: {data}')
time.sleep(3.5)
output_path = './data/Stock_Pool/stock_owner_plate.csv'
output_df.to_csv(output_path, index=False, encoding='utf-8-sig')
self.default_logger.info(f'Stock Owner Plate Updated: {output_path}')
def update_stock_basicinfo(self):
"""
Update stock static information for all markets and all forms of equities (E.g., Stock, Futures, etc.)
"""
output_df = pd.DataFrame()
for market, stock_type in itertools.product(self.market_list, self.security_type_list):
ret, data = self.quote_ctx.get_stock_basicinfo(market=market, stock_type=stock_type)
if ret == RET_OK:
output_df = pd.concat([output_df, data], ignore_index=True)
else:
self.default_logger.error(f'Cannot get Stock Basic Info: {data}')
output_path = './data/Stock_Pool/stock_basic_info.csv'
output_df.to_csv(output_path, index=False, encoding='utf-8-sig')
self.default_logger.error(f'Stock Static Basic Info Updated: {output_path}')
def store_all_data_database(self):
"""
Store all files in ./data/{stock_code}/*.csv to the database in pre-defined format.
"""
file_list = glob.glob(f"./data/*/*_1M.csv", recursive=True)
for input_file in file_list:
input_csv = pd.read_csv(input_file, index_col=None)
self.default_logger.info(f'Saving to Database: {input_file}')
self.__store_data_database(input_csv, k_type=KLType.K_1M)
file_list = glob.glob(f"./data/*/*_1D.csv", recursive=True)
for input_file in file_list:
input_csv = pd.read_csv(input_file, index_col=None)
self.default_logger.info(f'Saving to Database: {input_file}')
self.__store_data_database(input_csv, k_type=KLType.K_DAY)
file_list = glob.glob(f"./data/*/*_1W.csv", recursive=True)
for input_file in file_list:
input_csv = pd.read_csv(input_file, index_col=None)
self.default_logger.info(f'Saving to Database: {input_file}')
self.__store_data_database(input_csv, k_type=KLType.K_WEEK)
def stock_quote_subscription(self, input_data: dict, stock_list: list, strategy_map: dict, timeout: int = 60):
"""
实时报价回调,异步处理已订阅股票的实时报价推送。
:param input_data: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}
:param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])
:param strategy_map: Strategies defined in ./strategies class. Should be inherited from based class Strategies
:param timeout: Subscription Timeout in secs.
"""
self.__unlock_trade()
# Stock Quote Handler
handler = StockQuoteHandler(quote_ctx=self.quote_ctx, trade_ctx=self.trade_ctx, input_data=input_data,
strategy_map=strategy_map, trd_env=self.trd_env)
self.quote_ctx.set_handler(handler) # 设置实时报价回调
self.quote_ctx.subscribe(stock_list, [SubType.QUOTE, SubType.ORDER_BOOK, SubType.BROKER], is_first_push=True,
subscribe_push=True) # 订阅实时报价类型,FutuOpenD开始持续收到服务器的推送
time.sleep(timeout)
def rt_data_subscription(self, input_data: dict, stock_list: list, strategy_map: dict, timeout: int = 60):
"""
实时分时回调,异步处理已订阅股票的实时分时推送。
:param input_data: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}
:param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])
:param strategy_map: Strategies defined in ./strategies class. Should be inherited from based class Strategies
:param timeout: Subscription Timeout in secs.
"""
self.__unlock_trade()
# RT Data Handler
handler = RTDataHandler(quote_ctx=self.quote_ctx, trade_ctx=self.trade_ctx, input_data=input_data,
strategy_map=strategy_map, trd_env=self.trd_env)
self.quote_ctx.set_handler(handler) # 设置实时分时推送回调
self.quote_ctx.subscribe(stock_list, [SubType.RT_DATA, SubType.ORDER_BOOK, SubType.BROKER], is_first_push=True,
subscribe_push=True) # 订阅分时类型,FutuOpenD开始持续收到服务器的推送
time.sleep(timeout)
def cur_kline_subscription(self, input_data: dict, stock_list: list, strategy_map: dict, timeout: int = 60,
subtype: SubType = SubType.K_1M):
"""
实时 K 线回调,异步处理已订阅股票的实时 K 线推送。
:param input_data: Dictionary in Format {'HK.00001': pd.Dataframe, 'HK.00002': pd.Dataframe}
:param stock_list: A List of Stock Code with Format (e.g., [HK.00001, HK.00002])
:param strategy_map: Strategies defined in ./strategies class. Should be inherited from based class Strategies
:param timeout: Subscription Timeout in secs.
:param subtype: Subscription SubType for FuTu (i.e., Trading Frequency)
"""
self.__unlock_trade()
# cur Kline Handler
handler = CurKlineHandler(quote_ctx=self.quote_ctx, trade_ctx=self.trade_ctx, input_data=input_data,
strategy_map=strategy_map, trd_env=self.trd_env)
self.quote_ctx.set_handler(handler) # 设置实时分时推送回调
self.quote_ctx.subscribe(stock_list, [subtype, SubType.ORDER_BOOK, SubType.BROKER], is_first_push=True,
subscribe_push=True) # 订阅K线数据类型,FutuOpenD开始持续收到服务器的推送
time.sleep(timeout)
def display_quota(self):
"""
Display Stock Subscription & Historical K-Line Quota
"""
ret, data = self.quote_ctx.query_subscription()
if ret == RET_OK:
self.default_logger.info(f'Query Subscription Quota: \n{data}')
ret, data = self.quote_ctx.get_history_kl_quota(get_detail=True)
if ret == RET_OK:
self.default_logger.info(f'Historical K-line Quota: \n{data}')
def request_trading_days(self, start_date: str, end_date: str) -> dict:
"""
请求交易日,注意该交易日是通过自然日剔除周末和节假日得到,未剔除临时休市数据。
:param start_date:
:param end_date:
:return: [{'time': '2020-04-01', 'trade_date_type': 'WHOLE'}, ...]
"""
ret, data = self.quote_ctx.request_trading_days(TradeDateMarket.HK, start=start_date, end=end_date)
if ret == RET_OK:
self.default_logger.info(f'Trading Days: {data}')
return data
else:
self.default_logger.error(f'error: {data}')
| [
"billpwchan@hotmail.com"
] | billpwchan@hotmail.com |
c27b80d12837b1f36e9a5eaaf6ebd29008d75246 | 532e1bdcc3f866a491b48440fe49ca9589ac019c | /automotora/core/admin.py | 882d4518bc583fddbafadc2ff472c40097f3ff07 | [] | no_license | bastianvargas/automotora | 569fab97329d9fa35b27a2b38a4da4f6c416fb7e | 768287218148664550581b2cec432f0b98364004 | refs/heads/master | 2020-04-13T16:45:46.347768 | 2018-12-27T19:05:18 | 2018-12-27T19:05:18 | 163,327,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django.contrib import admin
from .models import Marca, Automovil
class AutomovilAdmin(admin.ModelAdmin):
list_display = ('patente', 'marca', 'anio', 'modelo')
search_fields = ['patente', 'modelo']
list_filter = ('marca',)
admin.site.register(Marca)
admin.site.register(Automovil, AutomovilAdmin)
| [
"bastianlc@live.cl"
] | bastianlc@live.cl |
88d6ee612b0d1e97c48922b79fafa6f05b4281c6 | e0a785810cf6c966f7b7c8eb0ec7eaf945595bfe | /prob21.py | bcf15cee489044ded4c0bc486263991cb56048c9 | [] | no_license | DerekRies/eulers | e01d11aa94adcdeda388dd0da3bb40f017305e70 | 723e857dd9cb86c165d36dc50f36646ad6f86aed | refs/heads/master | 2021-01-10T18:04:51.540776 | 2016-02-06T13:16:54 | 2016-02-06T13:16:54 | 51,049,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # coding=utf-8
"""
Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
import utils
def amicable(n):
d1 = sum(utils.divisors(n))
if sum(utils.divisors(d1)) == n:
if d1 != n:
return d1
else:
return 0
else:
return 0
def main(n):
pairs = []
for a in xrange(n):
b = amicable(a)
if bool(b) and a < n and b < n:
# print a, b
if utils.list_has(pairs, a) or utils.list_has(pairs, b):
None
else:
pairs.append(a)
pairs.append(b)
print sum(pairs)
if __name__ == '__main__':
main(10000) | [
"influenztial@gmail.com"
] | influenztial@gmail.com |
52f98364a0cef7d8e68fae2acff9757aa2344030 | 17f2128b748130e8601d3c4b5fded8ff7bcd49a8 | /turnin/mbaut030_lab3_part2_tests.py | b6d50959c5b42d09be81ab9c2c94e726f73caf86 | [] | no_license | Mavbrick/Lab_3 | 7c92d52fdada9e0b7b80539e9381ce796999da4b | cc4b145759b9392ef2e61a8afd05ac2aaa469aa9 | refs/heads/master | 2023-03-30T18:23:45.280159 | 2021-04-10T05:11:44 | 2021-04-10T05:11:44 | 356,374,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,475 | py | # Array of tests to run (in order)
# Each test contains
# description -
# steps - A list of steps to perform, each step can have
# inputs - A list of tuples for the inputs to apply at that step
# *time - The time (in ms) to wait before continuing to the next step
# and before checking expected values for this step. The time should be a multiple of
# the period of the system
# *iterations - The number of clock ticks to wait (periods)
# expected - The expected value at the end of this step (after the "time" has elapsed.)
# If this value is incorrect the test will fail early before completing.
# * only one of these should be used
# expected - The expected output (as a list of tuples) at the end of this test
# An example set of tests is shown below. It is important to note that these tests are not "unit tests" in
# that they are not ran in isolation but in the order shown and the state of the device is not reset or
# altered in between executions (unless preconditions are used).
tests = [ {'description': 'PINA: 0x00 => PORTC: 0x40',
'steps': [ {'inputs': [('PINA',0x00)], 'iterations': 5 } ],
'expected': [('PORTC',0x40)],
},
{'description': 'PINA: 0x02 => PORTC: 0x60',
'steps': [ {'inputs': [('PINA',0x02)], 'iterations': 5 } ],
'expected': [('PORTC',0x60)],
},
{'description': 'PINA: 0x04 => PORTC: 0x70',
'steps': [ {'inputs': [('PINA',0x04)], 'iterations': 5 } ],
'expected': [('PORTC',0x70)],
},
{'description': 'PINA: 0x06 => PORTC: 0x38',
'steps': [ {'inputs': [('PINA',0x06)], 'iterations': 5 } ],
'expected': [('PORTC',0x38)],
},
{'description': 'PINA: 0x08 => PORTC: 0x3C',
'steps': [ {'inputs': [('PINA',0x08)], 'iterations': 5 } ],
'expected': [('PORTC',0x3C)],
},
{'description': 'PINA: 0x0C => PORTC: 0x3E',
'steps': [ {'inputs': [('PINA',0x0C)], 'iterations': 5 } ],
'expected': [('PORTC',0x3E)],
},
{'description': 'PINA: 0x0F => PORTC: 0x3F',
'steps': [ {'inputs': [('PINA',0x0F)], 'iterations': 5 } ],
'expected': [('PORTC',0x3F)],
},
]
# Optionally you can add a set of "watch" variables these need to be global or static and may need
# to be scoped at the function level (for static variables) if there are naming conflicts. The
# variables listed here will display everytime you hit (and stop at) a breakpoint
#watch = ['<function>::<static-var>','PORTB']
| [
"mbaut030@wch136-29.cs.ucr.edu"
] | mbaut030@wch136-29.cs.ucr.edu |
b3cca6e0ac8a9ef1e9547d5f4501c40fe3a9887a | a0b4559ba6f1e3d08c56bf585c80540fa9bb0abe | /n_to_m_while_exercise.py | ecc3f719802511a8c705d225602779aeabb342dc | [] | no_license | tedgey/while_loop_exercises | 430ecd0e20771ab93d30ffbd28adfaaf37dfc445 | 912b3f477b1c187f095085c1fef1b4aa66d61bfc | refs/heads/master | 2020-05-15T05:50:42.349435 | 2019-04-18T15:24:50 | 2019-04-18T15:24:50 | 182,111,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # counting between two given numbers
start_point = int(input("What number should we start on? "))
end_point = int(input("What number should we end on?"))
count = start_point
while count < end_point:
count += 1
print(count) | [
"thomasedgeworth@Thomass-MacBook-Air.local"
] | thomasedgeworth@Thomass-MacBook-Air.local |
d8d742854ec7842465b985ad93830852b7b6d3a1 | 8d14d526969d8e970254f08563ff2c6e6583dd35 | /Python/2019/Hafta20191122/venv/Scripts/easy_install-script.py | 530572ec9997ac842f173dfe02dcf73848586a38 | [] | no_license | osmanraifgunes/MedipolCodes | c29db62896162c4b1a2c8c274877fff63149f826 | 943b014269e9a7b529e74741ce14447dbd7d5df5 | refs/heads/master | 2023-01-09T10:31:02.907945 | 2020-06-09T18:05:04 | 2020-06-09T18:05:04 | 218,612,787 | 6 | 13 | null | 2023-01-07T18:58:55 | 2019-10-30T19:59:16 | Python | UTF-8 | Python | false | false | 453 | py | #!C:\code\MedipolCodes\Python\Hafta20191122\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"osmanraifgunes@gmail.com"
] | osmanraifgunes@gmail.com |
c7e8a1160734707c181df50a65c90179c55ba35b | 55dca9ceb218bc3788b8c4e7eef98d7eb563e60d | /pybo/migrations/0008_board_category_guestbook.py | d2005e2790419fdc3c2ab65b369ad0d8a9877e33 | [] | no_license | venttii/pybo | bdff32b09f6b38cab0002ed74f4fdd41ce78b5b2 | 81a473787431e06d7bb3c32be42dba1c3b80211b | refs/heads/master | 2023-02-20T21:29:15.545279 | 2021-01-27T16:15:35 | 2021-01-27T16:15:35 | 329,205,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,582 | py | # Generated by Django 3.1.3 on 2021-01-27 08:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pybo', '0007_auto_20210113_1148'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
('description', models.CharField(blank=True, max_length=200, null=True)),
('has_answer', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Guestbook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('password', models.CharField(max_length=50)),
('create_date', models.DateTimeField()),
('modify_date', models.DateTimeField(blank=True, null=True)),
('ip_address', models.GenericIPAddressField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_guestbook', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('content', models.TextField()),
('create_date', models.DateTimeField()),
('modify_date', models.DateTimeField(blank=True, null=True)),
('view_count', models.IntegerField(blank=True, default=0, null=True)),
('notice', models.BooleanField(default=False)),
('ip', models.CharField(max_length=50)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_board', to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_board', to='pybo.category')),
('voter', models.ManyToManyField(related_name='voter_board', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"ehdgus2357@gmail.com"
] | ehdgus2357@gmail.com |
dd3b524eb8f66a476b7e2643fdedf9fb7d0e6480 | 380a320fbdde9189c39ebbd055fedefa205fd541 | /cplane_np_hw.py | f9faf3cea48511153847e3c933044b6001dfe752 | [
"MIT"
] | permissive | chapman-cs510-2017f/cw-08-keith_jet | ba4f94e8fec744f4c47cbf0d6f17fd884d912b4b | cff072045fe9c7aec6a7407ed2db3cc2ff7d63e8 | refs/heads/master | 2021-07-16T00:17:19.440631 | 2017-10-20T14:12:07 | 2017-10-20T14:12:07 | 107,347,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,997 | py | import cplane_np as c_np
import numpy as np
from cplane_np import ArrayComplexPlane
import matplotlib.pyplot as plt
class JuliaPlane(ArrayComplexPlane):
def __init__(self, c):
self.xmin = -1
self.xmax = 1
self.xlen = 2000
self.ymin = -1
self.ymax = 1
self.ylen = 2000
self.fs =[]
self.c=c
self.__initplane__()
def __initplane__(self):
#print("init plane as:")
super().__initplane__() #call parent method
#print(self.plane)
fv = np.vectorize(julia(self.c))
self.plane=fv(self.plane)
#print("plane transformed as:")
#print(self.plane)
def refresh(self):
"""Regenerate complex plane.
Populate self.plane with new points (x + y*1j), using
the stored attributes of xmax, xmin, xlen, ymax, ymin,
and ylen to set plane dimensions and resolution. Reset
the attribute fs to an empty list so that no functions
are transforming the fresh plane.
"""
self.fs = []
self.__initplane__()
def toCSV(self, filename):
f = open(filename, "a")
f.write(filename+"\n")
f.write("Paratemters:")
f.write("\n")
f.write('c='+str(self.c)+"\n")
f.write("xmin="+str(self.xmin)+"\n")
f.write("xmax="+str(self.xmax)+"\n")
f.write("xlen="+str(self.xlen)+"\n")
f.write("ymin="+str(self.ymin)+"\n")
f.write("ymax="+str(self.ymax)+"\n")
f.write("ylen="+str(self.ylen)+"\n")
f.write("\n")
f.write("\n")
f.write("The array of the transformed plane by Julia:\n")
for i in range(0, len(self.plane)):
rowval=''
for j in range(0, len(self.plane[i])):
rowval=rowval+str(self.plane[i][j])+','
rowval=rowval[:-1] #get rid of last ','
f.write(rowval)
f.write("\n")
f.close()
def fromCSV(self, filename):
#read paramets from CSV file and reset the plane to match the settings in the file
f = open(filename,"r")
#ignore the file header
f.readline()
f.readline()
#get c
r=f.readline()
self.c=complex(r[2:])
r=f.readline()
self.xmin=int(r[5:])
r=f.readline()
self.xmax=int(r[5:])
r=f.readline()
self.xlen=int(r[5:])
r=f.readline()
self.ymin=int(r[5:])
r=f.readline()
self.ymax=int(r[5:])
r=f.readline()
self.ylen=int(r[5:])
# refresh the plane array to the values stored in the .csv file directly.
self.refresh(self.c)
def show(self):
plt.imshow(self.plane, interpolation = 'bicubic', cmap =('viridis'), extent = (self.xmin,self.xmax,self.ymin,self.ymax) )
plt.title('c='+str(self.c))
plt.show()
def julia(c):
'''
This function returns a function specified by the following algorithm:
1. The returned function should take one complex parameter z as an input, and return one positive integer as an output.
2. If the input number z has a magnitude abs(z) larger than 2, the function should output the integer 1.
3. Otherwise, set a counter n=1.
4. Increment n by 1, then transform the input z according to the formula z = z**2 + c.
Check the resulting magnitude abs(z): If the magnitude now exceeds 2, then return the value of n;
If the magnitude does not yet exceed 2, repeat this step.
5. If the positive integer max is reached before the magnitude of z exceeds 2, the preceding loop should abort
and return the output integer 0.
input parameters:
c: a complex
maxnumber: an optional positive integer, if it is reached before the magnitude of z exceeds 2, the preceding loop should abort
and return the output integer 0
return:
return a function with complex z as an input and return one postive interger as an output
Examples of use of this function:
>>> f1 = julia(c,100)
>>> f2 = f1(z)
'''
def magnitude(z):
n=0
if abs(z)>2:
#print("init abs(z)>2")
return 1
else:
n=1
while abs(z) < 2:
z=z**2+c
n +=1
return n
return magnitude
#jp=JuliaPlane(1+0.5*1j)
#print("write para and transformed plane to CSV file")
#jp.toCSV("jp.csv")
#print("ream para and transformed plane from CSV file")
#jp.fromCSV("jp.csv")
#print("refresh the plane with para from CSV file")
#print(jp.plane)
'''
print("what happen when refreh by c=0.5+0.5*1j")
jp.refresh(0.5+0.5*1j)
print(jp.plane)
print("what happen when c=0")
jp=JuliaPlane(0+0*1j)
'''
#f1=julia(-0.01-0.01*1j,100)
#print(f1)
#f2=f1(1+2*1j)
#print(f2) | [
"jiali@chapman.edu"
] | jiali@chapman.edu |
cffdbd0ab90b9b59ef7a69aff564ea1323fbe6b4 | 3181efe062a6745fc2e5d182260b8e94ce6c5701 | /0MyProject_Quant/海龟反转策略/4_2.方向过滤参数自动选择及策略回测_并行.py | 2c92a341e8f5ec2c95609c7db032528948d0fb42 | [] | no_license | LibreChou/PythonLearning | e240fddc559dc8614d4db95e79d047b18cc1be52 | 562ded21e84b68f43c539c65b91aed3a880162ce | refs/heads/master | 2023-03-12T12:18:33.501881 | 2021-03-04T11:33:42 | 2021-03-04T11:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | # Author:Zhang Yuan
import warnings
warnings.filterwarnings('ignore')
from MyPackage import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import statsmodels.api as sm
from scipy import stats
#------------------------------------------------------------
__mypath__ = MyPath.MyClass_Path("") # 路径类
mylogging = MyDefault.MyClass_Default_Logging(activate=True, filename=__mypath__.get_desktop_path()+"\\方向过滤策略回测.log") # 日志记录类,需要放在上面才行
myfile = MyFile.MyClass_File() # 文件操作类
myword = MyFile.MyClass_Word() # word生成类
myexcel = MyFile.MyClass_Excel() # excel生成类
mytime = MyTime.MyClass_Time() # 时间类
myplt = MyPlot.MyClass_Plot() # 直接绘图类(单个图窗)
mypltpro = MyPlot.MyClass_PlotPro() # Plot高级图系列
myfig = MyPlot.MyClass_Figure(AddFigure=False) # 对象式绘图类(可多个图窗)
myfigpro = MyPlot.MyClass_FigurePro(AddFigure=False) # Figure高级图系列
mynp = MyArray.MyClass_NumPy() # 多维数组类(整合Numpy)
mypd = MyArray.MyClass_Pandas() # 矩阵数组类(整合Pandas)
mypdpro = MyArray.MyClass_PandasPro() # 高级矩阵数组类
myDA = MyDataAnalysis.MyClass_DataAnalysis() # 数据分析类
myDefault = MyDefault.MyClass_Default_Matplotlib() # 画图恢复默认设置类
# myMql = MyMql.MyClass_MqlBackups() # Mql备份类
# myBaidu = MyWebCrawler.MyClass_BaiduPan() # Baidu网盘交互类
# myImage = MyImage.MyClass_ImageProcess() # 图片处理类
myBT = MyBackTest.MyClass_BackTestEvent() # 事件驱动型回测类
myBTV = MyBackTest.MyClass_BackTestVector() # 向量型回测类
myML = MyMachineLearning.MyClass_MachineLearning() # 机器学习综合类
mySQL = MyDataBase.MyClass_MySQL(connect=False) # MySQL类
mySQLAPP = MyDataBase.MyClass_SQL_APPIntegration() # 数据库应用整合
myWebQD = MyWebCrawler.MyClass_QuotesDownload(tushare=False) # 金融行情下载类
myWebR = MyWebCrawler.MyClass_Requests() # Requests爬虫类
myWebS = MyWebCrawler.MyClass_Selenium(openChrome=False) # Selenium模拟浏览器类
myWebAPP = MyWebCrawler.MyClass_Web_APPIntegration() # 爬虫整合应用类
myEmail = MyWebCrawler.MyClass_Email() # 邮箱交互类
myReportA = MyQuant.MyClass_ReportAnalysis() # 研报分析类
myFactorD = MyQuant.MyClass_Factor_Detection() # 因子检测类
myKeras = MyDeepLearning.MyClass_tfKeras() # tfKeras综合类
myTensor = MyDeepLearning.MyClass_TensorFlow() # Tensorflow综合类
myMT5 = MyMql.MyClass_ConnectMT5(connect=False) # Python链接MetaTrader5客户端类
myMT5Pro = MyMql.MyClass_ConnectMT5Pro(connect=False) # Python链接MT5高级类
myMT5Indi = MyMql.MyClass_MT5Indicator() # MT5指标Python版
myDefault.set_backend_default("Pycharm") # Pycharm下需要plt.show()才显示图
#------------------------------------------------------------
'''
# 1.根据前面 信号利润过滤测试 输出的文档,解析文档名称,读取参数,选择极值。
# 2.一个特定的策略参数作为一个目录,存放该下面所有指标的结果。
# 3.不同名称的指标会自动判断极值,且输出图片。最后会输出表格文档,整理这些极值。
# 4.由于不是大型计算,并行是一次性所有并行。
# 5.并行运算注意内存释放,并且不要一次性都算完,这样容易爆内存。分组进行并行。
'''
'''
# 说明
# 这里的策略回测是建立在前面已经对指标的范围过滤做了参数选择。
# 前面对每个具体策略都通过指标过滤方式,算出了各个指标过滤效果的极值。我们根据极值对应的指标值做回测。
# 画的图中,分别展示 过滤前训练集价格和指标、过滤前训练集策略、过滤后全集价格和指标、过滤后全集策略以及训练集策略。
# 方向过滤作用到整个样本。
# 并行以品种来并行,以时间框来分组。
# 由于指标较多,并行运算时间长,防止出错输出日志。
'''
#%%
from MyPackage.MyProjects.向量化策略测试.Direct_Filter import Auto_Choose_DFilter_Param
choo_para = Auto_Choose_DFilter_Param()
myDefault.set_backend_default("agg")
#%% ******需要修改******
choo_para.symbol_list = myMT5Pro.get_main_symbol_name_list()
choo_para.total_folder = "F:\\工作---策略研究\\公开的海龟策略\\_海龟反转研究"
choo_para.core_num = -1
#%%
from MyPackage.MyProjects.向量化策略测试.Direct_Filter import Direct_Filter_BackTest
rf_bt = Direct_Filter_BackTest()
myplt.set_backend("agg") # agg 后台输出图片,不占pycharm内存
#%%
rf_bt.symbol_list = choo_para.symbol_list
rf_bt.total_folder = choo_para.total_folder
rf_bt.core_num = -1
#%% ******修改函数******
# 策略的当期信号(不用平移):para_list策略参数,默认-1为lag_trade,-2为holding。
def stratgy_signal(dataframe, para_list=list or tuple):
return myBTV.stra.turtle_reverse(dataframe, para_list[0], price_arug= ["High", "Low", "Close"])
rf_bt.stratgy_signal = stratgy_signal
#%%
# ---多进程必须要在这里执行
if __name__ == '__main__':
# ---
print("开始方向过滤参数自动选择:")
choo_para.main_func()
print("开始方向过滤策略回测:")
rf_bt.main_func()
| [
"39754824+MuSaCN@users.noreply.github.com"
] | 39754824+MuSaCN@users.noreply.github.com |
45e74d9dfdecb336974b4cf3cdb41aa55ebedeb6 | ffcb2fdd33dfb0a3271dbfb01ddcfdb61793828f | /vgg19/deploy.py | a83bc95c67226f91bfb48eeb502eba6336ed362a | [] | no_license | Richardych/challengerAI | c148f91bdbd6b15c660998f2e880edbdd98f9c4d | af622a2dd780a457f13fa037498928b208d93ee0 | refs/heads/master | 2021-08-24T15:34:10.452118 | 2017-12-10T05:28:29 | 2017-12-10T05:28:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | # coding=utf-8
import os
import sys
root='/home/yuchaohui/ych/caffe_ych/' #根目录
sys.path.insert(0,root+'build/python')
import caffe
import numpy as np
deploy=root + 'models/vgg/vgg19/VGG_ILSVRC_19_layers_deploy.prototxt' #deploy文件
caffe_model=root + 'models/vgg/vgg19/caffe_vgg_train_iter_60000.caffemodel' #训练好的 caffemodel
labels_filename = root +'models/vgg/vgg19/scene_classes.csv' #类别名称文件,将数字标签转换回类别名称
#mean_file = 'mean.npy' #root + 'models/vgg/VGG_mean.binaryproto'
import os
dir = root+'scene_test_a_images_20170922'
filelist=[]
filenames = os.listdir(dir)
for fn in filenames:
fullfilename = os.path.join(dir,fn)
filelist.append(fullfilename)
#img=root+'testaip/20e7c0534d98b626b7faa9c41df00e2bc65e6cc5.jpg' #随机找的一张待测图片
def Test(img):
caffe.set_mode_gpu()
caffe.set_device(0)
net = caffe.Net(deploy,caffe_model,caffe.TEST) #加载model和network
#图片预处理设置
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) #设定图片的shape格式(1,3,28,28)
transformer.set_transpose('data', (2,0,1)) #改变维度的顺序,由原始图片(28,28,3)变为(3,28,28)
#transformer.set_mean('data', np.load(mean_file).mean(1).mean(1)) #减去均值,前面训练模型时没有减均值,这儿就不用
transformer.set_raw_scale('data', 255) # 缩放到【0,255】之间
transformer.set_channel_swap('data', (2,1,0)) #交换通道,将图片由RGB变为BGR
im=caffe.io.load_image(img) #加载图片
net.blobs['data'].data[...] = transformer.preprocess('data',im) #执行上面设置的图片预处理操作,并将图片载入到blob中
#执行测试
out = net.forward()
labels = np.loadtxt(labels_filename, str, delimiter='\t') #读取类别名称文件
prob= net.blobs['prob'].data[0].flatten() #取出最后一层(prob)属于某个类别的概率值,并打印,'prob'为最后一层的名称
#print 'prob:>>>>',prob,'\n'
print len(prob)
order=prob.argsort()[-3:] #[4] #将概率值排序,取出最大值所在的序号 ,9指的是分为0-9十类
print 'order:>>>>>>>>',order,'\n'
#print len(order)
#argsort()函数是从小到大排列
#print 'the class is:',labels[order] #将该序号转换成对应的类别名称,并打印
f=file(root+"models/vgg/vgg19/resultlabel.txt","a+")
f.writelines(img+' '+str(order)+'\n')
for i in range(2196, len(filelist)):
img= filelist[i]
print img
Test(img)
"""
def meanprotoTonpy(meanprotofile):
import numpy as np
MEAN_PROTO_PATH = meanprotofile # 待转换的pb格式图像均值文件路径
MEAN_NPY_PATH = 'mean.npy' # 转换后的numpy格式图像均值文件路径
blob = caffe.proto.caffe_pb2.BlobProto() # 创建protobuf blob
data = open(MEAN_PROTO_PATH, 'rb' ).read() # 读入mean.binaryproto文件内容
blob.ParseFromString(data) # 解析文件内容到blob
array = np.array(caffe.io.blobproto_to_array(blob))# 将blob中的均值转换成numpy格式,array的shape (mean_number,channel, hight, width)
mean_npy = array[0] # 一个array中可以有多组均值存在,故需要通过下标选择其中一组均值
np.save(MEAN_NPY_PATH ,mean_npy)
"""
| [
"yuchaohui1234@gmail.com"
] | yuchaohui1234@gmail.com |
7c5b13fc736557163c95d289141ff4870117e2e0 | b5a9469cb779031936bb613719397d7b3c279626 | /backend/apps/privacy/sitemaps.py | 2bc9aa6eae410f25322fcf965d670fd616158b73 | [] | no_license | arsavit/Cidsfg_copy | a34858d63749db0e821cb2f26b1eb31c4565c0f9 | 0145e9f1a397899b03a8d767fb96f1d238ec21f9 | refs/heads/main | 2023-07-08T11:18:10.042595 | 2021-08-11T08:09:27 | 2021-08-11T08:09:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | from django.contrib.sitemaps import Sitemap
from .models import Privacy
class PrivacySitemap(Sitemap):
""" Получение страницы ПОЛИТИКА КОНФИДЕНЦИАЛЬНОСТИ """
changefreq = 'weekly'
priority = 0.9
location = '/privacy/'
def items(self):
return Privacy.objects.all().order_by('-id')[:1]
def lastmod(self, obj):
return obj.updated
| [
"arsavit@gmail.com"
] | arsavit@gmail.com |
699c75b97c7a8afdb70e4ce79f91ad7f94158668 | 95a2bb2ef56ca80ad7cb51d67a42242bf18fa337 | /jump/models.py | b17ca1bff68351cf244316d1a03fec9d36836f23 | [] | no_license | zhangxianbo/soms | ac2183c0a285fe56456461101ecc78ca314c3929 | 0ba1802b0e2e9111e0f1855480723be8e2941bcd | refs/heads/master | 2021-04-09T15:46:14.086425 | 2016-07-14T08:15:21 | 2016-07-14T08:15:21 | 62,615,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py | #coding=utf-8
from django.db import models
from datetime import datetime
# Create your models here.
class Host(models.Model):
hostid = models.AutoField(primary_key=True)
idc = models.CharField('机房',max_length=50)
addr = models.CharField('机架等标识',max_length=50)
sn = models.CharField('序列号',max_length=30,blank=True)
ip = models.GenericIPAddressField('ip地址')
port = models.IntegerField()
online = models.CharField('在线状态',max_length=10)
use = models.CharField('用途',max_length=50,blank=True)
switch = models.CharField('交换机',max_length=50,blank=True)
comment = models.CharField('备注',max_length=100, blank=True, null=True)
def __unicode__(self):
return u' %s' % (self.ip)
class User(models.Model):
userid = models.AutoField(primary_key=True)
username = models.CharField('用户名',max_length=20)
password = models.CharField('密码',max_length=100,blank=True)
#ip = models.ManyToManyField(Host)
name = models.CharField('姓名',max_length=50,blank=True)
email = models.EmailField('邮箱',max_length=50,blank=True)
update_time = models.DateTimeField('更新时间',default=datetime.now)
def __unicode__(self):
return u'%s' % (self.username)
class Meta:
ordering = ['username']
class Userhost(models.Model):
#uid = models.OneToOneField(User)
#hid = models.ManyToManyField(Host)
uid = models.ForeignKey(User)
hid = models.ForeignKey(Host)
permcode = models.CharField('权限位',max_length=10,blank=True)
def __unicode__(self):
return u'%s %s %s' % (self.uid,self.hid,self.permcode)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
56872ad5888944ca9236de18d8891bea6993cbc7 | b27b0bcd22973d029688ea3f10c9abfe651800a4 | /abc134/abc_134_c.py | ead15f57bd16c735af615104664a2ecfe8eb99b1 | [] | no_license | KouheiFurukawa/atcoder-back-number | 8f6b7c6704bc6fb32bb8ca9ccc9971650c0fdf8f | 2fe11830614bf2ce64ee7947734043fe47800277 | refs/heads/master | 2020-06-03T13:45:54.531015 | 2020-05-17T05:53:39 | 2020-05-17T05:53:39 | 191,591,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from sys import stdin
N = int(stdin.readline().rstrip())
A = [int(stdin.readline().rstrip()) for _ in range(N)]
first = 0
second = -1
for k in range(N):
if A[k] > first:
second = first
first = A[k]
elif second < A[k] <= first:
second = A[k]
for l in range(N):
if A[l] == first:
print(second)
else:
print(first)
| [
"furukawabashi42@gmail.com"
] | furukawabashi42@gmail.com |
058648be146529a5630cfb86feda2ef00f44c42f | 8be2fac312e89453700e0a216fa05867caa12c5f | /hinge_gradientdescent_adptSetpsize.py | 5089403a60cf057c9759cf38c3d31e3ce7d57cfb | [] | no_license | saumyachoksi/Machine-Learning-Algorithms | 8678070716eb9c0bb1f46ab38ba0ccbc396bdc8f | 141d7861c53aab8b3143e610a85b87af69e3b063 | refs/heads/main | 2023-03-29T21:04:04.573066 | 2021-03-16T23:38:58 | 2021-03-16T23:38:58 | 345,784,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,448 | py | import sys
import math
import random
#datafile = sys.argv[1]
datafile = "ionosphere/ionosphere.data"
#datafile = "data.txt"
f = open(datafile)
data = []
i=0
l = f.readline()
#** Read Data **
while(l != ''):
a = l.split()
l2 = []
for j in range(0, len(a), 1):
l2.append(float(a[j]))
l2.append(1)
data.append(l2)
l = f.readline()
rows = len(data)
cols = len(data[0])
f.close()
#print(rows,cols)
#print (data)
#** Read labels **
#labelfile = sys.argv[2]
labelfile = "ionosphere/ionosphere.trainlabels.0"
#labelfile = "labels.txt"
f = open(labelfile)
trainlabels = {}
n = []
n.append(0)
n.append(0)
l = f.readline()
while(l != ''):
a = l.split()
if(int(a[0]) == 0):
trainlabels[int(a[1])] = -1
else:
trainlabels[int(a[1])] = int(a[0])
l = f.readline()
n[int(a[0])] += 1
#** Initialize w and dellf**
w = []
dellf = []
for j in range(0,cols,1):
# w.append((0.002*random.uniform(-0.01,0.01))-0.01)
w.append(random.uniform(-0.01, 0.01))
dellf.append(0)
print ("Intial W::",w)
def dot(x, y):
return sum(x_i*y_i for x_i, y_i in zip(x, y))
## Gradient descent iteration
#eta = 0.001
eta_list = [1,0.1,0.01,0.001,0.0001,0.00001,0.000001,0.0000001,0.00000001,0.000000001,0.0000000001,0.00000000001]
theta = 0.001 # Stopping Condition
#theta = 0.001 # Stopping Condition
preverror = float ('inf')
error = 0
bestobj = float ('inf')
for i in range(0, rows, 1):
if(trainlabels.get(i)!= None):
#error += ((trainlabels[i] - dot(w,data[i]))**2)
error += max(0, 1- (trainlabels[i]*dot(w,data[i])))
#print (error)
while (abs(preverror - error) > theta):
preverror = error
for j in range(0, cols, 1):
dellf[j] = 0
for i in range(0, rows, 1):
if(trainlabels.get(i) != None):
dp = dot(w,data[i])
for j in range(0, cols, 1):
if ((trainlabels[i] * dp)<1):
dellf[j] += -(data[i][j]*trainlabels[i])
else:
dellf[j] += 0
#print ("dellf:",dellf)
for k in range(0,len(eta_list),1):
eta = eta_list[k]
for j in range(0, cols, 1):
w[j] = w[j] - eta * (dellf[j])
error = 0
for i in range(0, rows, 1):
if(trainlabels.get(i)!= None):
#error += ((trainlabels[i] - dot(w,data[i]))**2)
error += max(0, 1- (trainlabels[i]*dot(w,data[i])))
print ("error",error)
if (bestobj>error):
bestobj = error
best_eta = eta
for j in range(0, cols, 1):
w[j] = w[j] + eta * (dellf[j])
#print("ETA:", best_eta)
eta = best_eta
for j in range(0, cols, 1):
w[j] = w[j] - eta * (dellf[j])
error = 0
for i in range(0, rows, 1):
if(trainlabels.get(i)!= None):
#error += ((trainlabels[i] - dot(w,data[i]))**2)
error += max(0, 1- (trainlabels[i]*dot(w,data[i])))
print ("error",error)
#print ("diff",preverror - error)
print ("Final W::",w)
normw = 0
for j in range(0, cols-1, 1):
normw += w[j]**2
normw = math.sqrt(normw)
print ("||w||=", normw)
d_origin = w[len(w)-1]/normw
print ("dist to orgin=", abs(d_origin))
for i in range(0, rows, 1):
if(trainlabels.get(i)==None):
dp = dot(w,data[i])
if(dp>0):
print ("1", i)
else:
print ("0", i)
| [
"noreply@github.com"
] | noreply@github.com |
c77c63579a804de62fd29378f19a9657fa5df454 | 9ba5a98aaabbb851e5f44148f78d42f48cf24224 | /app.py | 18c80735e4e9a7da5718b55f56314ab710273bc1 | [] | no_license | PrettyWood/laputa-graphql | cc695409b79d3b7eb13b0334baf84141fef90737 | 070c7476be93af09e2653fdbb5dad46420348ded | refs/heads/master | 2020-03-18T19:36:26.697149 | 2018-05-29T07:51:46 | 2018-05-29T08:19:36 | 135,164,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | from flask import Flask, redirect
from flask_graphql import GraphQLView
from database import db_session, init_db
from schema import schema
app = Flask(__name__)
app.debug = True
app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True))
@app.route('/')
def index():
return redirect('/graphql')
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
init_db()
app.run()
| [
"eric.jolibois@toucantoco.com"
] | eric.jolibois@toucantoco.com |
a109967e5b7a97237246fba43de3aa5bc7166fa1 | fa3096939beca2bc877824ef7bd7d7826c73aa6c | /ztq_core/test/test_redis_wrap.py | 552685ffbcb343775b89b3dc01f0e092eeae8932 | [
"MIT"
] | permissive | everydo/ztq | b043c18288125d6391caa14f0de6b629c7a8083b | 0237239626c9662ffa47879b590a534b2287c5d1 | refs/heads/master | 2021-06-08T09:21:09.265011 | 2016-02-23T09:01:23 | 2016-02-23T09:01:23 | 6,314,401 | 31 | 16 | MIT | 2018-05-21T01:40:35 | 2012-10-20T22:21:45 | Python | UTF-8 | Python | false | false | 4,097 | py | #coding:utf-8
'''
测试说明:
此测试是针对redis_wrap库进行自动json编码的测试
测试结果:
Ran 5 tests in 0.036s
FAILED (failures=1)
失败原因是在对list进行remove(value)操作的时候,redis的lrem无法删除序列化后的对象,
set类型能正常remove序列化后的对象.
@author: Zay
'''
import unittest
from ztq_core import get_redis, get_list, get_hash, get_set, get_dict, setup_redis, \
get_key, set_key, get_queue
class TestRediswrap(unittest.TestCase):
def setUp(self):
"""初始化连接redis,和初始化变量
"""
setup_redis('default', '192.168.209.128', 6379, socket_timeout=2)
get_redis(system='default').delete('list')
get_redis(system='default').delete('set')
get_redis(system='default').delete('hash')
get_redis(system='default').delete('dict')
get_redis(system='default').delete('kv')
get_redis(system='default').delete('queue')
self.message = {"hello":"grizzly"}
def test_getset(self):
"""进行基本的redis 的key进行get和set的操作.
"""
Test_key = get_key('kv',serialized_type='json')
self.assertEqual(Test_key,None)
set_key('kv',self.message)
Test_key = get_key('kv',serialized_type='json')
self.assertEqual(Test_key,self.message)
def test_dict(self):
"""测试redis_wrap的dict类型的操作
"""
Test_dict = get_dict('dict',serialized_type='json')
Test_dict['id'] = self.message
self.assertEqual(self.message, Test_dict['id'])
for k,v in Test_dict.items():
self.assertEqual(k, 'id')
self.assertEqual(v, self.message)
del Test_dict['id']
self.assertNotEqual(self.message,Test_dict.get('id'))
def test_hash(self):
"""测试redis_wrap的 hash类型的操作
"""
Test_dict = get_hash('hash',serialized_type='json')
Test_dict['id'] = self.message
self.assertEqual(self.message, Test_dict['id'])
del Test_dict['id']
self.assertNotEqual(self.message,Test_dict.get('id'))
def test_list(self):
"""进行redis_wrap的list的基本操作
"""
Test_list = get_list('list',serialized_type='json')
Test_list.append(self.message)
self.assertEqual( len(Test_list),1)
for item in Test_list:
self.assertEqual(self.message, item)
#这一步失败原因是redis的lrem方法有无法删除序列化后的数据
Test_list.remove(self.message)
self.assertEqual( len(Test_list),0)
def test_set(self):
"""进行对redis_wrap的set类型的基本操作
"""
Test_set = get_set('set',serialized_type='json')
Test_set.add(self.message)
for item in Test_set:
self.assertEqual( item,self.message)
Test_set.remove(self.message)
self.assertEqual( len(Test_set),0)
def test_queue(self):
"""进行redis_wrap的queue的基本操作
"""
Test_queue = get_queue('queue',serialized_type='json')
Test_queue.push(self.message)
self.assertEqual( len(Test_queue),1)
for item in Test_queue:
self.assertEqual(self.message, item)
#这一步失败原因是redis的lrem方法有无法删除数据
Test_queue.remove(self.message)
self.assertEqual( len(Test_queue),0)
#===========================================================================
#
# message = Test_queue.pop(timeout= 1)
# self.assertEqual(self.message, message)
# self.assertEqual(len(Test_queue),0)
#===========================================================================
if __name__ == '__main__':
unittest.main()
| [
"xutaozhe@gmail.com"
] | xutaozhe@gmail.com |
a2082d583b1204cb5acc0a39b4dbc0bee07e32ba | 78c98220d9b922ce82caec7152caf25044c4fbe2 | /MikolovJoulinChopraEtAl2015/python/base_rnn_graph.py | 87348cc3e9d96cba945df430c3d98b9187fbc3b8 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | AI-Stuff/DeepLearningCertificate | f72df5407c873653828c50bb5c7e39bb5f8186c4 | 48b9a315c1636997dad6024c7e4974edf95d5334 | refs/heads/master | 2020-03-21T22:50:37.036146 | 2017-08-28T15:29:21 | 2017-08-28T15:29:21 | 139,150,736 | 1 | 0 | null | 2018-06-29T13:07:52 | 2018-06-29T13:07:52 | null | UTF-8 | Python | false | false | 7,915 | py | # Structurally Constrained Recurrent Network (SCRN) Model
#
# This gives an implementation of the SCRN model given in Mikolov et al. 2015, arXiv:1412.7753 [cs.NE],
# https://arxiv.org/abs/1412.7753 using Python and Tensorflow.
#
# This model is superceded by the Delta-RNN model given in Ororbia et al. 2017, arXiv:1703.08864 [cs.CL],
# https://arxiv.org/abs/1703.08864 implemented in this repository using Python and Tensorflow.
#
# This code fails to implement hierarchical softmax at this time as Tensorflow does not appear to include an
# implementation. Hierarchical softmax can be included at a future date when hierarchical softmax is available
# for Tensorflow.
#
# Stuart Hagler, 2017
# Imports
import math
import numpy as np
import tensorflow as tf
# Local imports
from batch_generator import batch_generator
from log_prob import log_prob
# Define base RNN TensorFlow graph class
class base_rnn_graph(object):
# Place holder for graph constructor
def __init__(self):
print('TensorFlow graph not defined')
# Placeholder function for cell definition
def _cell(self):
print('Cell not defined')
# Placeholder function to set up cell parameters
def _setup_cell_parameters(self):
print('Cell parameters not defined')
# Placeholder function to implement a tower to run part of a batch of training data on a GPU
def _training_tower(self, i, tower, gpu):
print('Training tower not defined')
# Placeholder function to implement a tower to run part of a batch of validation data on a GPU
def _validation_tower(self, tower, gpu):
print('Validation tower not defined')
# Train model parameters
def train(self, learning_rate, learning_decay, momentum, clip_norm, num_epochs, summary_frequency, training_text,
validation_text,logdir):
# Generate training batches
print('Training Batch Generator:')
training_batches = []
for tower in range(self._num_towers):
training_batches.append(batch_generator(tower, training_text[tower], self._training_batch_size,
self._num_training_unfoldings, self._vocabulary_size))
# Generate validation batches
print('Validation Batch Generator:')
validation_batches = []
tower = 0
for tower in range(self._num_towers):
validation_batches.append(batch_generator(tower, validation_text[tower], self._validation_batch_size,
self._num_validation_unfoldings, self._vocabulary_size))
# Training loop
batch_ctr = 0
epoch_ctr = 0
training_feed_dict = dict()
validation_feed_dict = dict()
with tf.Session(graph=self._graph, config=tf.ConfigProto(log_device_placement=True)) as session:
# Create summary writers
training_writer = tf.summary.FileWriter(logdir + 'training/', graph=tf.get_default_graph())
validation_writer = tf.summary.FileWriter(logdir + 'validation/', graph=tf.get_default_graph())
# Initialize
session.run(self._initialization)
print('Initialized')
# Iterate over fixed number of training epochs
for epoch in range(num_epochs):
# Display the learning rate for this epoch
print('Epoch: %d Learning Rate: %.2f' % (epoch+1, learning_rate))
# Training Step:
# Iterate over training batches
for tower in range(self._num_towers):
training_batches[tower].reset_token_idx()
session.run(self._reset_training_state)
for batch in range(training_batches[0].num_batches()):
# Get next training batch
training_batches_next = []
tower = 0
for tower in range(self._num_towers):
training_batches_next.append([])
training_batches_next[tower] = training_batches[tower].next()
batch_ctr += 1
# Optimization
training_feed_dict[self._clip_norm] = clip_norm
training_feed_dict[self._learning_rate] = learning_rate
training_feed_dict[self._momentum] = momentum
for tower in range(self._num_towers):
for i in range(self._num_training_unfoldings + 1):
training_feed_dict[self._training_data[tower][i]] = training_batches_next[tower][i]
_, summary = session.run([self._optimize, self._training_summary], feed_dict=training_feed_dict)
# Summarize current performance
training_writer.add_summary(summary, epoch * training_batches[0].num_batches() + batch)
if (batch+1) % summary_frequency == 0:
cst = session.run(self._cost, feed_dict=training_feed_dict)
print(' Total Batches: %d Current Batch: %d Cost: %.2f' %
(batch_ctr, batch+1, cst))
# Validation Step:
# Iterate over validation batches
for tower in range(self._num_towers):
validation_batches[tower].reset_token_idx()
session.run(self._reset_validation_state)
validation_log_prob_sum = 0
for _ in range(validation_batches[0].num_batches()):
# Get next validation batch
validation_batches_next = []
tower = 0
for tower in range(self._num_towers):
validation_batches_next.append([])
validation_batches_next[tower] = validation_batches[tower].next()
# Validation
validation_batches_next_label = []
for tower in range(self._num_towers):
validation_batches_next_label_tmp = []
for i in range(self._num_validation_unfoldings):
validation_feed_dict[self._validation_input[tower][i]] = validation_batches_next[tower][i]
validation_batches_next_label_tmp.append(validation_batches_next[tower][i+1])
validation_batches_next_label.append(validation_batches_next_label_tmp)
validation_prediction = session.run(self._validation_prediction, feed_dict=validation_feed_dict)
# Summarize current performance
for tower in range(self._num_towers):
for i in range(self._num_validation_unfoldings):
for j in range(self._validation_batch_size):
validation_log_prob_sum = validation_log_prob_sum + \
log_prob(validation_prediction[tower][i][j], validation_batches_next_label[tower][i][j])
# Calculation validation perplexity
N = self._num_towers * self._num_validation_unfoldings * \
validation_batches[0].num_batches() * self._validation_batch_size
perplexity = float(2 ** (-validation_log_prob_sum / N))
print('Epoch: %d Validation Set Perplexity: %.2f' % (epoch+1, perplexity))
# Update learning rate
if epoch > 0 and perplexity > perplexity_last_epoch:
learning_rate *= learning_decay
perplexity_last_epoch = perplexity | [
"noreply@github.com"
] | noreply@github.com |
79800ad69194b3606c66406778ab10c02b8d53a4 | 75523e326e407498c7f741ae276ff09730ee86b7 | /view/reviewer_view.py | 6cd6bb7295123d070a9530b0604d99bef8503c53 | [] | no_license | 1hs9/ConferenceManagementSystem | eb374f5d18f8f0b7386dd908140829a8ecd3efa7 | 8e485bbe0d2af2bfb335501ae1d5cdb902c0412d | refs/heads/main | 2023-06-11T21:04:56.094568 | 2021-07-06T05:43:50 | 2021-07-06T05:43:50 | 383,347,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | import sys
sys.path.append('/Users/harshitsharma/PycharmProjects/ConferenceManagementSystem/controller')
sys.path.append('/Users/harshitsharma/PycharmProjects/ConferenceManagementSystem/model')
class ReviewerView:
def reviewer_portal1(self, first_name, last_name):
print("***************************************\n"
' * Welcome {} {} *\n'
'***************************************\n'
'\nPress 1 to Specify Topics:\nPress 2 to View Allocated Papers\nPress 0 to log-out\n'
.format(first_name, last_name))
def user_name(self, first_name, last_name):
print("\n***************************************\n"
" * {} {} *\n"
"***************************************\n".format(first_name, last_name))
def paper_management(self, first_name, last_name):
print("\n***************************************\n"
" * {} {} *\n"
"***************************************\n"
"\nPress 1 to Download Paper\nPress 2 to Review Paper".format(first_name, last_name))
| [
"noreply@github.com"
] | noreply@github.com |
b005d6ca3c2a8b6170c8d74997aaea94fb3cdb6a | 006f440b387a88ecfb48607cd979b9216f0a21e7 | /mysite/WebLibrary/models.py | 10d75a8707d8f1074d25f6d2436982344f114ef1 | [] | no_license | moluszysdominika/PSInt | a71be4c9af7349c9eb2d9d3ff4dc4ded139c1859 | 91ec9cc550a09650c08407acc5ba124bc6d29837 | refs/heads/master | 2020-08-09T21:20:31.471978 | 2020-02-02T22:34:47 | 2020-02-02T22:34:47 | 214,176,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,544 | py | from django.db import models
# Create your models here.
class Reader(models.Model):
login = models.CharField(max_length=45)
password = models.CharField(max_length=45)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
address = models.CharField(max_length=200)
city = models.CharField(max_length=45)
region = models.CharField(max_length=100)
telephone = models.CharField(max_length=45)
postcode = models.CharField(max_length=45)
email = models.EmailField()
class Librarian(models.Model):
login = models.CharField(max_length=45)
password = models.CharField(max_length=45)
class Category(models.Model):
name = models.CharField(max_length=200)
class Book(models.Model):
isbn = models.CharField(max_length=13)
title = models.CharField(max_length=200)
author = models.CharField(max_length=200)
pages = models.IntegerField(max_length=5)
publisher = models.CharField(max_length=100)
year = models.IntegerField(max_length=4)
description = models.TextField()
category = models.ForeignKey(Category, on_delete=models.CASCADE)
class Order(models.Model):
order_date = models.DateTimeField
pickup_date = models.DateTimeField
return_date = models.DateTimeField
reader = models.ForeignKey(Reader, on_delete=models.CASCADE)
book = models.ForeignKey(Book, on_delete=models.CASCADE)
class Administrator(models.Model):
login = models.CharField(max_length=45)
password = models.CharField(max_length=45) | [
"cienista97@gmail.com"
] | cienista97@gmail.com |
f98699dbba275aa7c9012f686bb3d2cdeb277ce5 | 49c2418c977aa6091183283b1d71be806868522d | /main_app/migrations/0008_alter_airline_rating.py | f7cdd20e29c7820b05f64941eae833ba994ad432 | [] | no_license | Kir-Cicegi/voyageur | 5e1a6390523935f817ad7ed38ead09f8eb3f63a9 | e55eb7afc75875fcbe949216052562e6870cb73d | refs/heads/main | 2023-06-12T14:18:38.034019 | 2021-07-09T03:13:04 | 2021-07-09T03:13:04 | 382,433,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | # Generated by Django 3.2.4 on 2021-07-07 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0007_rename_name_airline_airline'),
]
operations = [
migrations.AlterField(
model_name='airline',
name='rating',
field=models.IntegerField(choices=[('1', '$'), ('2', '$$'), ('3', '$$$'), ('4', '$$$$'), ('5', '$$$$$')], default='1', max_length=1),
),
]
| [
"84298571+Kir-Cicegi@users.noreply.github.com"
] | 84298571+Kir-Cicegi@users.noreply.github.com |
627bcc579421c9e68946a4001c3726b2fc02b966 | e7b665624c1134f7a6b3ab7c043cfa5ec83227bb | /CycleGAN/__init__.py | c73deef2fd0626ca2afa3252d2b902d7958b1f51 | [] | no_license | zhijie-ai/GAN | 46f896909d1f5caedb7725cf44d328e24f4ad699 | 5e64b416209058721c582c3b71a1e9ca25cf169d | refs/heads/master | 2022-10-26T10:28:08.279901 | 2019-08-26T14:09:15 | 2019-08-26T14:09:15 | 204,423,289 | 1 | 3 | null | 2022-10-07T00:52:36 | 2019-08-26T07:45:08 | Python | UTF-8 | Python | false | false | 622 | py | #----------------------------------------------
# -*- encoding=utf-8 -*- #
# __author__:'xiaojie' #
# CreateTime: #
# 2019/7/5 22:13 #
# #
# 天下风云出我辈, #
# 一入江湖岁月催。 #
# 皇图霸业谈笑中, #
# 不胜人生一场醉。 #
#----------------------------------------------
# CycleGAN的分别用keras和tf的2种实现方式 | [
"15311484394@189.cn"
] | 15311484394@189.cn |
9d9a806f2ec508f3d202103ff17d592e98259b7b | 26f23588e80acc2b28d4cc70a8fbcf78c5b33a20 | /PythonSkills/decorator/basic02.py | ac7856a1b7b8a973b0e4280108fd34948670b37e | [] | no_license | Timehsw/PythonCouldbeEverything | aa31b3e32bf68b49fe8e96b971637353a8ef644f | 85d4f1a2c93c7b1edc34ceb9e8bb3c8d7beb30e9 | refs/heads/master | 2021-01-01T15:38:25.253094 | 2018-01-22T06:49:05 | 2018-01-22T06:49:05 | 97,661,530 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
'''
Created by hushiwei on 2018/1/7.
学习装饰器
闭包
函数里面可以定义函数
函数可以被传递赋值
函数可以被返回
那么装饰器就是,在函数之前额外做些事情
'''
'''
装饰器
有参函数
'''
def a_new_decorator(a_func):
def wrapTheFunction(*args,**kwargs):
print "I am doing some boring work before execution a_func()"
a_func(*args,**kwargs)
print "I am doing some boring work after execution a_func()"
return wrapTheFunction
@a_new_decorator
def a_function_requiring_decoration(name="hushiwei"):
print "I am %s"%name
a_function_requiring_decoration("Mike")
| [
"hsw.time@gmail.com"
] | hsw.time@gmail.com |
f8db1a24c726745ec3776f50cf977141e2e76ff5 | 7e170d0bf83a0753c2d524810e4764ac94223efe | /TF-Page/website/migrations/0003_auto_20170813_2019.py | c18030b2c7de2b001ee292ada822d50af70033ee | [] | no_license | tyronfonseca/TF-page-django | 2613291efca9800b9736b732897106316320c4b0 | 3ef1e9988d950aa7d83f15cb40d7f09035c35c7c | refs/heads/master | 2023-08-29T20:41:27.734635 | 2018-08-30T14:47:18 | 2018-08-30T14:47:18 | 424,658,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-14 02:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0002_auto_20170813_2006'),
]
operations = [
migrations.RemoveField(
model_name='habilidades',
name='tipo',
),
migrations.AlterField(
model_name='habilidades',
name='imagen',
field=models.ImageField(upload_to='images/habilidades'),
),
]
| [
"tyron.fonseca@hotmail.com"
] | tyron.fonseca@hotmail.com |
1abbb24193c9915113b11e312474efed18a77c9a | 7bd1c899b2306dd96442b4d716317bac78c8a0b8 | /auto_operations/config_operations.py | 3c48ceedff264a10478cfe47cba67d77c0e37af8 | [] | no_license | panarahc/automation | 24fbde4cb2ab5bca361f1fe1e03a2044ef6f83d2 | 041fee604185cb8294d3ad9515be8b7740a39138 | refs/heads/master | 2021-01-21T21:10:37.780213 | 2017-05-21T07:22:38 | 2017-05-21T07:22:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | #!/usr/bin/python
from operation_registry import OperationRegistry
import re
registry = OperationRegistry()
@registry.device_operation('apply_config',family='ios,iosxe')
def apply_config(context,target,commands):
"""
Returns:
Error if any encountered or True if no errors are seen.
"""
with context.get_connection("cli") as cli, cli.authorize():
result = cli.configure(commands)
return result
@registry.device_operation('config_replace',family='ios')
def config_replace(context,target,filename):
"""
IOS: configure replace <filename> force revert trigger error
Returns:
True if no errors are seen.
"""
commands = [ "configure replace {} force revert trigger error".format(filename) ]
with context.get_connection("cli") as cli:
result = cli.execute(commands)
return True
| [
"scet.amit@gmail.com"
] | scet.amit@gmail.com |
dcb95199ae8b2d00c2e425403a3da419cc0d1c69 | c8a41e7b2caa015903dc5aff2d8e34a5cbd34b8d | /python/itertools/compress-the-string.py | 0208eec6cbcbd5eed48a9fa26c1a73150292dc0a | [] | no_license | mauricioabreu/hacker-rank | bad197fec3582979df148a8212d330097191c2b7 | 3d2aaae53f438e4ef8d9382cc0c22003248c6787 | refs/heads/master | 2021-01-10T07:25:23.869714 | 2018-06-16T23:17:51 | 2018-06-16T23:17:51 | 46,177,986 | 6 | 1 | null | 2016-08-27T16:18:36 | 2015-11-14T14:38:13 | Python | UTF-8 | Python | false | false | 212 | py | from itertools import groupby
chars = raw_input().strip()
groups = []
for key, value in groupby(chars):
groups.append((len(list(value)), int(key)))
print ' '.join(['(%s, %s)' % (k, v) for k, v in groups])
| [
"mauricio.abreua@gmail.com"
] | mauricio.abreua@gmail.com |
102b5869f49685e43d6c2ce258585056568e87d3 | bd5708c70d54e1798f6a11bfb7c98f5899dc1526 | /packedbits.py | 1e576cabd73946b400284a4c60fae51e5f48bfb1 | [
"MIT"
] | permissive | patrickmacarthur/ssrando | 56e8b931e498b3c9bbca7d37ca1e1b966a4d2b70 | 39e8fbc43433b063641474f037d9b3be134e1522 | refs/heads/master | 2023-08-07T07:54:51.349196 | 2021-10-11T05:20:31 | 2021-10-11T05:20:31 | 415,785,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | # from: https://github.com/LagoLunatic/wwrando/blob/master/wwr_ui/packedbits.py
import base64
class PackedBitsWriter:
def __init__(self):
self.bits_left_in_byte = 8
self.current_byte = 0
self.bytes = bytearray()
def write(self, value, length):
while length:
if length >= self.bits_left_in_byte:
bits_to_read = self.bits_left_in_byte
else:
bits_to_read = length
mask = (1 << bits_to_read) - 1
self.current_byte |= (value & mask) << (8 - self.bits_left_in_byte)
self.bits_left_in_byte -= bits_to_read
length -= bits_to_read
value >>= bits_to_read
if self.bits_left_in_byte:
continue
self.flush()
def flush(self):
self.bytes.append(self.current_byte)
self.current_byte = 0
self.bits_left_in_byte = 8
def to_base64(self):
return base64.b64encode(self.bytes).decode('ascii')
class PackedBitsReader:
def __init__(self, bytes):
self.bytes = bytes
self.current_byte_index = 0
self.current_bit_index = 0
@staticmethod
def from_base64(data):
return PackedBitsReader(base64.b64decode(data.encode('ascii')))
def read(self, length):
bits_read = 0
value = 0
bits_left_to_read = length
while bits_read != length:
if bits_left_to_read > 8:
bits_to_read = 8
else:
bits_to_read = bits_left_to_read
if (bits_to_read + self.current_bit_index > 8):
bits_to_read = 8 - self.current_bit_index
mask = ((1 << bits_to_read) - 1) << self.current_bit_index
current_byte = self.bytes[self.current_byte_index]
value = ((current_byte & mask) >> self.current_bit_index) << bits_read | value
self.current_bit_index += bits_to_read
self.current_byte_index += self.current_bit_index >> 3
self.current_bit_index %= 8
bits_left_to_read -= bits_to_read
bits_read += bits_to_read
return value
| [
"lepelog4@gmail.com"
] | lepelog4@gmail.com |
0db8f21a975d0dc646839485352d0b6cf4df4064 | 4d66852253aaff5ee93ab73f41a531dd4a0b615d | /baseline_midsf.py | e37424d670ee6649c54e0d648617910e620d2ad2 | [] | no_license | zsc19/CaBERT-SLU | ed030ed01809f0ca6f93505d483a9d6750a48442 | ee1a46cf7c69ab5662a47ce9e65735cf877b1ea9 | refs/heads/main | 2023-08-10T19:30:21.793823 | 2021-09-17T14:18:25 | 2021-09-17T14:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,880 | py | """For model training and inference
Data input should be a single sentence.
"""
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam, RMSprop
from transformers import BertTokenizer, BertModel, BertConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pickle
import copy
import numpy as np
import collections
from tqdm import tqdm
from collections import Counter, defaultdict
from model import MULTI
from all_data_slot import get_dataloader
from config import opt
from utils import *
def train(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path_with_tokens)
print('Data Type: ', opt.datatype)
print('Use pretrained weights: ', opt.retrain)
# dataset
with open(opt.dic_path_with_tokens, 'rb') as f:
dic = pickle.load(f)
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
if opt.datatype == "mixatis" or opt.datatype == "mixsnips":
# ATIS Dataset
X_train, y_train, _ = zip(*train_data)
X_test, y_test, _ = zip(*test_data)
elif opt.datatype == "semantic":
# Semantic parsing Dataset
X, y = zip(*train_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
elif opt.datatype == "e2e" or opt.datatype == "sgd":
# Microsoft Dialogue Dataset / SGD Dataset
all_data = []
dialogue_id = {}
dialogue_counter = 0
counter = 0
for data in train_data:
for instance in data:
all_data.append(instance)
dialogue_id[counter] = dialogue_counter
counter += 1
dialogue_counter += 1
indices = np.random.permutation(len(all_data))
train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000]
test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100]
train_loader = get_dataloader(train, len(dic), len(slot_dic), opt)
val_loader = get_dataloader(test, len(dic), len(slot_dic), opt)
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = MULTI(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
else:
print("Train from scratch...")
model = model.to(device)
# optimizer, criterion
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'gamma', 'beta']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
# 'weight_decay_rate': 0.01},
# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
# 'weight_decay_rate': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,lr=opt.learning_rate_bert, warmup=.1)
optimizer = Adam(model.parameters(), weight_decay=0.01, lr=opt.learning_rate_classifier)
if opt.data_mode == 'single':
criterion = nn.CrossEntropyLoss().to(device)
else:
criterion = nn.BCEWithLogitsLoss(reduction='sum').to(device)
criterion2 = nn.CrossEntropyLoss(reduction='sum').to(device)
best_loss = 100
best_accuracy = 0
best_f1 = 0
# Start training
for epoch in range(opt.epochs):
print("====== epoch %d / %d: ======"% (epoch+1, opt.epochs))
# Training Phase
total_train_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.train()
ccounter = 0
for (captions_t, masks, labels, slot_labels) in tqdm(train_loader):
captions_t = captions_t.to(device)
masks = masks.to(device)
labels = labels.to(device)
slot_labels = slot_labels.to(device)
slot_labels = slot_labels.reshape(-1)
optimizer.zero_grad()
encoder_logits, decoder_logits, slot_logits = model(captions_t)
train_loss = criterion(encoder_logits, labels)
decoder_logits = decoder_logits.view(-1, len(dic))
slabels = labels.unsqueeze(1)
slabels = slabels.repeat(1, opt.maxlen, 1)
slabels = slabels.view(-1, len(dic))
train_loss += criterion(decoder_logits, slabels)
train_loss += criterion2(slot_logits, slot_labels)
train_loss.backward()
optimizer.step()
total_train_loss += train_loss
P, R, F1, acc = f1_score_intents(encoder_logits, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
print('Average train loss: {:.4f} '.format(total_train_loss / train_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/train_loader.dataset.num_data)
# Validation Phase
total_val_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.eval()
ccounter = 0
stats = defaultdict(Counter)
for (captions_t, masks, labels, slot_labels) in val_loader:
captions_t = captions_t.to(device)
masks = masks.to(device)
labels = labels.to(device)
slot_labels = slot_labels.to(device)
slot_labels = slot_labels.reshape(-1)
with torch.no_grad():
encoder_logits, decoder_logits, slot_logits = model(captions_t)
val_loss = criterion(encoder_logits, labels)
decoder_logits = decoder_logits.view(-1, len(dic))
slabels = labels.unsqueeze(1)
slabels = slabels.repeat(1, opt.maxlen, 1)
slabels = slabels.view(-1, len(dic))
val_loss += criterion(decoder_logits, slabels)
total_val_loss += val_loss
P, R, F1, acc = f1_score_intents(encoder_logits, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
_, index = torch.topk(slot_logits, k=1, dim=-1)
evaluate_iob(index, slot_labels, slot_dic, stats)
print('========= Validation =========')
print('Average val loss: {:.4f} '.format(total_val_loss / val_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/val_loader.dataset.num_data)
val_acc = total_acc/val_loader.dataset.num_data
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print('========= Slot =========')
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
# for label in stats:
# if label != 'total':
# p, r, f1 = prf(stats[label])
# print(f'{label:4s}: P = {p:.4f}, R = {r:.4f}, F1 = {f1:.4f}')
if f1 > best_f1:
print('saving with loss of {}'.format(total_val_loss),
'improved over previous {}'.format(best_loss))
best_loss = total_val_loss
best_accuracy = val_acc
best_f1 = f1
best_stats = copy.deepcopy(stats)
torch.save(model.state_dict(), 'checkpoints/best_{}_{}_baseline.pth'.format(opt.datatype, opt.data_mode))
print()
print('Best total val loss: {:.4f}'.format(total_val_loss))
print('Best Test Accuracy: {:.4f}'.format(best_accuracy))
print('Best F1 Score: {:.4f}'.format(best_f1))
p_slot, r_slot, f1_slot = prf(best_stats['total'])
print('Final evaluation on slot filling of the validation set:')
print(f'Overall: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
#####################################################################
def test(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path)
# dataset
with open(opt.dic_path, 'rb') as f:
dic = pickle.load(f)
reverse_dic = {v: k for k,v in dic.items()}
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
if opt.test_path:
with open(opt.test_path, 'rb') as f:
test_data = pickle.load(f)
if opt.datatype == "atis":
# ATIS Dataset
X_train, y_train, _ = zip(*train_data)
X_test, y_test, _ = zip(*test_data)
elif opt.datatype == "semantic":
# Semantic parsing Dataset
X, y = zip(*train_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
elif opt.datatype == "e2e" or opt.datatype == "sgd":
# Microsoft Dialogue Dataset / SGD Dataset
all_data = []
dialogue_id = {}
dialogue_counter = 0
counter = 0
for data in train_data:
for instance in data:
all_data.append(instance)
dialogue_id[counter] = dialogue_counter
counter += 1
dialogue_counter += 1
indices = np.random.permutation(len(all_data))
X_train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000]
X_test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100]
X_train, mask_train = load_data(X_train)
X_test, mask_test = load_data(X_test)
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = MULTI(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
model = model.to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Store embeddings
if opt.test_mode == "embedding":
train_loader = get_dataloader(X_train, y_train, mask_train, opt)
results = collections.defaultdict(list)
model.eval()
for i, (captions_t, labels, masks) in enumerate(train_loader):
captions_t = captions_t.to(device)
labels = labels.to(device)
masks = masks.to(device)
with torch.no_grad():
hidden_states, pooled_output, outputs = model(captions_t, masks)
print("Saving Data: %d" % i)
for ii in range(len(labels)):
key = labels[ii].data.cpu().item()
embedding = pooled_output[ii].data.cpu().numpy().reshape(-1)
word_embeddings = hidden_states[-1][ii].data.cpu().numpy()
tokens = tokenizer.convert_ids_to_tokens(captions_t[ii].data.cpu().numpy())
tokens = [token for token in tokens if token != "[CLS]" and token != "[SEP]" and token != "[PAD]"]
original_sentence = " ".join(tokens)
results[key].append((original_sentence, embedding, word_embeddings))
torch.save(results, embedding_path)
# Run test classification
elif opt.test_mode == "data":
# Single instance
# index = np.random.randint(0, len(X_test), 1)[0]
# input_ids = X_test[index]
# attention_masks = mask_test[index]
# print(" ".join(tokenizer.convert_ids_to_tokens(input_ids)))
# captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device)
# mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device)
# with torch.no_grad():
# pooled_output, outputs = model(captions_t, mask)
# print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()])
# print("Real label: ", reverse_dic[y_test[index]])
# Validation Phase
test_loader = get_dataloader(X_test, y_test, mask_test, len(dic), opt)
error_ids = []
pred_labels = []
real_labels = []
test_corrects = 0
totals = 0
model.eval()
for i, (captions_t, labels, masks) in enumerate(test_loader):
print('predict batches: ', i)
captions_t = captions_t.to(device)
labels = labels.to(device)
masks = masks.to(device)
with torch.no_grad():
_, pooled_output, outputs = model(captions_t, masks)
co, to = calc_score(outputs, labels)
test_corrects += co
totals += to
if opt.data_mode == 'single':
idx = torch.max(outputs, 1)[1] != labels
wrong_ids = [tokenizer.convert_ids_to_tokens(caption, skip_special_tokens=True) for caption in captions_t[idx]]
error_ids += wrong_ids
pred_labels += [reverse_dic[label.item()] for label in torch.max(outputs, 1)[1][idx]]
real_labels += [reverse_dic[label.item()] for label in labels[idx]]
else:
for i, logits in enumerate(outputs):
log = torch.sigmoid(logits)
correct = (labels[i][torch.where(log>0.5)[0]]).sum()
total = len(torch.where(labels[i]==1)[0])
if correct != total:
wrong_caption = tokenizer.convert_ids_to_tokens(captions_t[i], skip_special_tokens=True)
error_ids.append(wrong_caption)
pred_ls = [reverse_dic[p] for p in torch.where(log>0.5)[0].detach().cpu().numpy()]
real_ls = [reverse_dic[i] for i, r in enumerate(labels[i].detach().cpu().numpy()) if r == 1]
pred_labels.append(pred_ls)
real_labels.append(real_ls)
with open('error_analysis/{}_{}.txt'.format(opt.datatype, opt.data_mode), 'w') as f:
f.write('----------- Wrong Examples ------------\n')
for i, (caption, pred, real) in enumerate(zip(error_ids, pred_labels, real_labels)):
f.write(str(i)+'\n')
f.write(' '.join(caption)+'\n')
f.write('Predicted label: {}\n'.format(pred))
f.write('Real label: {}\n'.format(real))
f.write('------\n')
test_acc = test_corrects.double() / test_loader.dataset.num_data if opt.data_mode == 'single' else test_corrects.double() / totals
print('Test accuracy: {:.4f}'.format(test_acc))
# User defined
elif opt.test_mode == "user":
while True:
print("Please input the sentence: ")
text = input()
print("\n======== Predicted Results ========")
print(text)
text = "[CLS] " + text + " [SEP]"
tokenized_text = tokenizer.tokenize(text)
tokenized_ids = np.array(tokenizer.convert_tokens_to_ids(tokenized_text))[np.newaxis,:]
input_ids = pad_sequences(tokenized_ids, maxlen=opt.maxlen, dtype="long", truncating="post", padding="post").squeeze(0)
attention_masks = [float(i>0) for i in input_ids]
captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device)
mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device)
with torch.no_grad():
pooled_output, outputs = model(captions_t, mask)
print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()])
print("=================================")
if __name__ == '__main__':
import fire
fire.Fire()
| [
"waynewu6250@gmail.com"
] | waynewu6250@gmail.com |
ec3c3e9a1609b3241c9287dcf01219c6d607eeb7 | d12b53101c289a1d752862e20ffc079e3ab4e057 | /2.0/overturn.py | a15642ab7a89395e9a8230990f94277a71dc0b9f | [] | no_license | UCAS007/adavanced-aritificial-intelligence | 13708985b65fe0d27ed1fe93e05eb54ddef9949d | d88fcc8f5a59f290a866a04db6bcbe133bdc3ba3 | refs/heads/master | 2021-01-10T15:21:07.819354 | 2016-05-03T14:03:00 | 2016-05-03T14:03:00 | 45,598,387 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,038 | py | """
Fmax=0.995512 Perceptron
"""
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier,Perceptron
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import metrics
import jieba
import mydataset
import os
import csv,codecs
import preprocessing as pp
import time
def train():
trainFileName='train.pkl'
testFileName='test.pkl'
pipelineFileName='pipeline.pkl'
if(os.path.exists(trainFileName)):
fin=open(trainFileName,'r')
trainData=pickle.load(fin)
trainClass=pickle.load(fin)
fin.close()
else:
trainText=mydataset.getAllTrainTextList()
i=0;
N=trainText.__len__()
trainData=[]
trainClass=[]
for (tag,text) in trainText:
i=i+1
if(i%5000==0):
print('i=%08d finished %5.5f%% using jieba to cut the text\n'%(i,i*100.0/N))
trainData.append(text)
trainClass.append(tag)
fout=open(trainFileName,'w')
pickle.dump(trainData,fout)
pickle.dump(trainClass,fout)
fout.close()
#if(os.path.exists(pipelineFileName)):
if(False):
fin=open(pipelineFileName,'r')
pipeline=pickle.load(fin)
fin.close()
else:
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', Perceptron()),
])
#pipeline.set_params(vect__max_df=0.6,clf__alpha=1e-07,clf__penalty='l2',tfidf__norm='l1',tfidf__use_idf=True,vect__ngram_range=(1,2))
pipeline.set_params(vect__max_df=0.6,tfidf__norm='l1',tfidf__use_idf=True,vect__ngram_range=(1,2))
trainNum=trainData.__len__()
pipeline.fit(trainData[0:trainNum],trainClass[0:trainNum])
fout=open(pipelineFileName,'w')
pickle.dump(pipeline,fout)
fout.close()
#################################### output train result
trainNum=trainData.__len__()
#print 'train result '+"#"*30
prec=pipeline.predict(trainData[0:trainNum])
expected=trainClass[0:trainNum]
#print("Classification report for classifier:\n%s\n"
#% (metrics.classification_report(expected, prec)))
TP=0.0
TN=0.0
FP=0.0
FN=0.0
N=trainData.__len__()
for i in range(0,trainNum):
if(prec[i]==expected[i]):
if(prec[i]==u'1'):
TP=TP+1
else:
TN=TN+1
else:
if(prec[i]==u'1'):
FP=FP+1
else:
FN=FN+1
P=TP/(TP+FP)
R=TP/(TP+FN)
F=2*P*R/(P+R)
#print('train result: P=%f,R=%f,F=%f\n'%(P,R,F))
return F,pipeline
############################################# output test result
if __name__ == '__main__' :
trainFileName='train.pkl'
testFileName='test.pkl'
pipelineFileName='pipeline.pkl'
bestPipelineFileName='bestPipeline.pkl'
Fmax=0
for i in range(1,10):
print ('i=%d \n'%(i))
t1=time.time()
F,pipeline=train()
t2=time.time()
print (t2-t1).__str__()+'s'
if(F>Fmax):
Fmax=F
bestPipeline=pipeline
print('Fmax=%f \n'%(Fmax))
fout=open(bestPipelineFileName,'w')
pickle.dump(bestPipeline,fout)
fout.close()
print('Fmax=%f \n' % (Fmax))
if(os.path.exists(testFileName)):
fin=open(testFileName,'r')
testText=pickle.load(fin)
fin.close()
else:
testText=mydataset.getTestTextList()
fout=open(testFileName,'w')
pickle.dump(testText,fout)
fout.close()
outputFileName='../output/upload.csv'
fileOutput=codecs.open(outputFileName,'w','utf-8')
prec=pipeline.predict(testText)
N=800001
for i in prec:
fileOutput.write(N.__str__()+','+i+'\n')
N=N+1
fileOutput.close()
os.system("mplayer ~/music/alert.mp3") | [
"youdaoyzbx@163.com"
] | youdaoyzbx@163.com |
7e639fe5d4f22819ad092582b315f0b876db46d0 | 621f6e38ddcb24217acabd717022c73195188d6e | /ui/play_display.py | 6e52c8d3b517e4844479e0edf9f421c9aac108e2 | [] | no_license | antoninmsl/threes_game | e86f6d5ca664cbbaaffe9900e59bf1cede28c227 | e7e2e5ca72c9a94adc9c9b4e3c06f14090f910a4 | refs/heads/main | 2023-04-22T01:16:15.003089 | 2021-05-13T11:39:39 | 2021-05-13T11:39:39 | 367,000,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | from termcolor import *
import sys
sys.path.append("../")
from tiles.tiles_acces import get_value
# -------- Fonction de la partie 1 --------
def full_display(plateau):
"""
Affichage en couleurs un plateau passé en paramètre.
"""
line_separator = ' ' * (plateau['n'] + 1) + ' ' * plateau['n'] # Définition d'une chaine de caractères utilisée
# pour afficher des lignes de séparations entre les lignes qui représentent les cases
message = '\n' # message est une chaine de caractères qui va être modifiée jusqu'à ce qu'elle contienne tout
# l'affichage du plateau de jeu
for i in range(0, plateau['n']*3):
# On parcours le tableau ligne par ligne, ici la valeur 3 permet d'afficher des cases de 3 lignes de haut
if i % 3 == 0: # A chaque nouvelle ligne parcourue du plateau, on ajoute une ligne de séparation avant
message += colored(line_separator, None, 'on_grey') + '\n'
for j in range(0, plateau['n']): # On parcours les lignes case par case
message += colored(' ', None, 'on_grey')
# A chaque nouvelle case parcourue du plateau, on ajoute une case de séparation avant
val = get_value(plateau, i//3, j) # On cherche la valeur de la case et on la garde en mémoire dans val
if val == 0:
# Si la valeur de la case est 0, on définie la couleur de la police en bleu
# pour que la valeur ne soit pas visible sur la couleur de fond bleue que l'on définie aussi
color = 'blue'
on_color = 'on_blue'
elif val == 1:
# Si la valeur de la case est 1, on définie la couleur de la police en blanc et la couleur de fond bleue
color = 'white'
on_color = 'on_blue'
elif val == 2:
# Si la valeur de la case est 2, on définie la couleur de la police en blanc et la couleur de fond rouge
color = 'white'
on_color = 'on_red'
else:
# Sinon, on définie la couleur de la police en noir (None car c'est la couleur par défaut)
# et la couleur de fond blanc
color = None
on_color = 'on_white'
if i%3 == 1: # Si la ligne est la deuxième ligne de la case, on ajoute la case en affichant la valeur
message += colored(str.center(str(val), 5), color, on_color, attrs=['bold'])
else: # Sinon, on ajoute la case sans afficher la valeur
message += colored(' ', None, on_color)
message += colored(' ', None, 'on_grey') + '\n'
# A chaque fin de ligne parcourue du plateau, on ajoute une case de séparation après
message += colored(line_separator, None, 'on_grey') + '\n' # On ajoute une ligne de séparation pour finir l'affichage
print(message) # On affiche le message (Le plateau mis en forme)
return message # On retourne le message (Le plateau mis en forme) pour les tests unitaires
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.