text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
from unittest import TestCase
from ... import SpannerModelRegistry
from .helper import TestModelA, TestModelB, TestModelC, TestModelD
class SpannerModelTests(TestCase):
def test_get_registered_models_in_correct_order(self):
sorted_models = list(SpannerModelRegistry.get_registered_models_in_correct_order())
self.assertEqual(len(sorted_models), 4)
self.assertEqual(sorted_models[0], TestModelA)
self.assertEqual(sorted_models[1], TestModelB)
self.assertEqual(sorted_models[2], TestModelD)
self.assertEqual(sorted_models[3], TestModelC)
def test_stmt_create(self):
ddl_statements = SpannerModelRegistry.create_table_statements()
self.assertEqual(len(ddl_statements), 10)
# ModelA
self.assertEqual(ddl_statements[0], """CREATE TABLE `model_a` (
`id_a` INT64 NOT NULL,
`field_int_not_null` INT64 NOT NULL,
`field_int_null` INT64 NULL,
`field_string_not_null` INT64 NOT NULL,
`field_string_null` STRING(200) NULL
) PRIMARY KEY (`id_a` );""")
# interleave index test
# fixme: move to own test
self.assertEqual(ddl_statements[3], "CREATE INDEX `interleaved` ON `model_b` (`id_a` , `idb_b` DESC, `value_field_x` , `value_field_y` ), INTERLEAVE IN `model_a`;")
# ModelD
self.assertEqual(ddl_statements[6], """CREATE TABLE `model_d` (
`id_a` INT64 NOT NULL,
`id_b` INT64 NOT NULL,
`value_field_x` INT64 NULL,
`value_field_y` INT64 NULL,
`value_field_z` STRING(5) NULL,
`id_d` INT64 NOT NULL
) PRIMARY KEY (`id_a` , `id_b` ) INTERLEAVE IN `model_a ` ON DELETE CASCADE;""")
def test_prio_dict(self):
prio_dict = SpannerModelRegistry.get_registered_models_prio_dict()
prio_dict_lookup = {}
for i in range(0, 10):
for cls in prio_dict[i]:
prio_dict_lookup[cls] = i
self.assertEqual(prio_dict_lookup[TestModelA], 0)
self.assertEqual(prio_dict_lookup[TestModelB], 1)
self.assertEqual(prio_dict_lookup[TestModelD], 1)
self.assertEqual(prio_dict_lookup[TestModelC], 2)
def test_stmt_delete(self):
ddl_statements = SpannerModelRegistry.delete_table_statements()
self.assertEqual(len(ddl_statements), 4)
self.assertEqual(ddl_statements[0], 'DROP TABLE `model_a`')
self.assertEqual(ddl_statements[1], 'DROP TABLE `model_b`')
self.assertEqual(ddl_statements[2], 'DROP TABLE `model_d`')
self.assertEqual(ddl_statements[3], 'DROP TABLE `model_c`')
|
from appJar import gui
import gensim
from langdetect import detect
#Carrega ambos os modelos para a memoria
#model = gensim.models.KeyedVectors.load_word2vec_format('model_trained_en.bin', binary = 1)
#model_pt = gensim.models.KeyedVectors.load_word2vec_format('model_trained_pt.txt')
model_pt = gensim.models.Word2Vec.load('mymodel_pt')
model = gensim.models.Word2Vec.load('mymodel')
text = []
result = []
related = []
similaritytuple = ()
#Processo chamado a cada segundo durante execucao
def testEvent():
similarity = []
#Pre-processamento do texto
text = app.getTextArea("t1").split()
if len(text) == 0:
#Se nao houver texto, deixar caixas vazias
app.updateListBox("list", [])
app.updateListBox("relatedwords", [])
app.updateListBox("similarityranking", [])
app.updateListBox("worstfit", [])
if len(text) == 1:
#Detetar linguagem
lang = detect(app.getTextArea("t1"))
if len(text) > 1:
lang = detect(app.getTextArea("t1"))
try:
if lang == "pt":
print("PT")
#Preencher variaveis com os valores adequados
result = model_pt.predict_output_word(text, topn = 5)
related = model_pt.most_similar(positive = text[:-1], topn=5)
if len(text) > 2:
tempstring1 = text[-2]
tempstring2 = text[-3]
similarity = [model_pt.similarity(tempstring1, tempstring2)]
worst = [model_pt.doesnt_match(text)]
else:
print("EN")
result = model.predict_output_word(text, topn = 5)
related = model.most_similar(positive = text[:-1], topn=5)
if len(text) > 2:
tempstring1 = text[-2]
tempstring2 = text[-3]
similarity = [model.similarity(tempstring1, tempstring2)]
worst = [model.doesnt_match(text)]
#Pos-processamento
divide = []
if result is not None:
for r in result:
divide.append(r[0])
divide2 = []
if related is not None:
for rel in related:
divide2.append(rel[0])
#Colocar variaveis nas caixas
app.updateListBox("list", divide)
app.updateListBox("relatedwords", divide2)
app.updateListBox("similarityranking", similarity)
app.updateListBox("worstfit", worst)
except KeyError:
#Caso o modelo nao reconheca a palavra.
print("Word Not Found.")
app = gui()
#Labels e caixas de texto
app.addLabel("Word Predictor", "Word Predictor")
app.addLabel("Text", "Text",0, 0)
app.addTextArea("t1",1,0)
app.addLabel("Suggestions", "Suggestions",0,1)
app.addListBox("list",[],1,1)
app.addLabel("Related Words", "Related Words",0,2)
app.addListBox("relatedwords",[],1,2)
app.addLabel("Similarity Ranking", "Similarity Ranking",0,3)
app.addListBox("similarityranking",[],1,3)
app.addLabel("Worst Fit", "Worst Fitting Word",0,4)
app.addListBox("worstfit",[],1,4)
#Inicializar processo
app.registerEvent(testEvent)
#Inicializar GUI
app.go() |
from django.test import TestCase# 单元测试类django.test.TestCase 集成unittest.TestCsse
from sign.models import Event,Guest
import time
from django.contrib.auth.models import User
# Create your tests here.
#首先创建测试类
class ModelTest(TestCase):
#初始化:分别创建一条发布会(Event)和一条嘉宾(Guest)的数据。
def setUp(self):
Event.objects.create(name="oneplus 3 event", status=True, limit=2000,address='shenzhen', start_time='2016-08-31 02:18:22')
result = Event.objects.get(name='oneplus 3 event')
Guest.objects.create(event=result, real_name='alen',phone='13711001101', email='alen@mail.com', sign=False)
#下面开始写测试用例了
#1.通过get的方法,查询插入的发布会数据,并根据地址判断
def test_event_models(self):
result = Event.objects.get(name="oneplus 3 event")
self.assertEqual(result.address, "shenzhen")
self.assertTrue(result.status)
#2.通过get的方法,查询插入的嘉宾数据,并根据名字判断
def test_guest_models(self):
result = Guest.objects.get(phone='13711001101')
self.assertEqual(result.real_name, "alen")
self.assertFalse(result.sign)
#写完测试用例后,执行测试用例。这里与unittest的运行方法也不一样
class LoginAction(TestCase):
#测试登陆动作
def setUp(self):
User.objects.create_user('admin', 'admin@email.com', 'admin123456')
def test_add_admin(self):
#测试添加用户
user=User.objects.get(username="admin")
print("----------------"+user.username)
self.assertEqual(user.username, "admin")
self.assertEqual(user.email, "admin@email.com")
def test_log_action_username_password_null(self):
#用户名密码为空
test_data={'username':'','password':''}
response=self.client.post('/login_action/',data=test_data)
self.assertEqual(response.status_code, 200)
self.assertIn(b"username or password error!", response.content)
def test_log_action_username_password_null(self):
#用户名密码错误
test_data={'username':'abc','password':'a123'}
response=self.client.post('/login_action/',data=test_data)
self.assertEqual(response.status_code, 200)
self.assertIn(b"username or password error!", response.content)
def test_log_action_username_password_null(self):
#用户名密码为空
test_data={'username':'admin','password':'admin123456'}
response=self.client.post('/login_action/',data=test_data)
self.assertEqual(response.status_code, 302)
class EventManageTest(TestCase):
'''发布会管理测试'''
login_user = {}
def setUP(self):
User.objects.create_user('admin', 'admin@email.com', 'admin123456')
Event.objects.create(name="oneplus 3 event", status=True, limit=2000,address='shenzhen', start_time='2016-08-31 02:18:22')
self.login_user ={'username':'admin','password':'admin123456'}
def test_event_manage_success(self):
response =self.client.post('/login_action/', data=self.login_user)
print("++++++++++++++++++"+response)
response = self.client.post('/event_manage/')
print("================"+response)
self.assertEqual(response.status_code, 200)
self.assertIn(b'oneplus 3 event', response.content)
self.assertIn(b'shenzhen', response.content)
def test_event_manage_sreach_sucess(self):
response =self.client.post('/login_action/', data=self.login_user)
response = self.client.post('/search_name/',{"name":"oneplus"})
self.assertEqual(response.status_code, 200)
self.assertIn(b'oneplus 3 event', response.content)
self.assertIn(b'beijing', response.content)
|
import torch
import numpy as np
from core.config import config
from core.build_network import build_network
from core.compute_score import calc_psnr, calc_ssim
from core.sr import generate_sr
from util.util import *
from os.path import join
## image_name, blur kernel index, upscale factor
im_name = 'baby.bmp'
factor = config.network.factor
ker_id = config.test.ker_id
## read image and convert to ycbcr
im = load_img(join(config.test.data_path, config.test.data_set, im_name), factor)
im_ycbcr = sc.rgb2ycbcr(im[..., -1::-1])
## generate blur image
im_hr, im_lr, im_bi = generate_lr(im_ycbcr[..., 0], factor, ker_id)
## load models and run SR
net = build_network()
ckpt = torch.load(join(config.test.model_path, 'epoch-x%d.pth'%factor))
net.load_state_dict(ckpt['model_state_dict'])
im_sr = generate_sr(net, im_lr, im_bi, config)
## compute scores on Y channel
psnr = calc_psnr(im_hr/255.0, im_sr, factor, data_range = config.network.input_range, benchmark=True)
ssim = calc_ssim(im_hr/255.0, im_sr, factor, data_range=config.network.input_range, benchmark=True)
print('Img: %s, factor: %d, psnr: %.2f, ssim: %.4f' %(im_name, factor, psnr, ssim))
## save rgb SR results
if config.test.is_save:
im_sr = np.clip(im_sr, 0, 1.0)*255.0
## super-resolve cb, cr channles using bicubic
_, _, im_cb = generate_lr(im_ycbcr[..., 1], factor, ker_id)
_, _, im_cr = generate_lr(im_ycbcr[..., 2], factor, ker_id)
im_sr_rgb = np.stack([im_sr, im_cb, im_cr], axis=-1)
im_sr_rgb = ycbcr2rgb(im_sr_rgb)
save_img(im_sr_rgb, im_name.split('.')[0]+'_sr_x%d.png'%factor, config.test.save_path)
|
#!/usr/bin/env python3
import os
import pytz
import hashlib
import stat
import sys
import requests
import tempfile
import zipfile
import time
import configparser
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
def getMinValue(now):
return (now.hour * 60) + now.minute
print("PyJoinMeet v0.0.1, A Simple Python Script to Join a Google Meet")
print("Copyright (C) Antony Jr.")
print("")
if len(sys.argv) < 3:
print("Usage: ./PyJoinMeet.py [Google Username] [Google Meet URL]")
sys.exit()
actual_user = sys.argv[1]
google_user = hashlib.md5(bytes(sys.argv[1], 'utf-8')).digest().hex()
google_meet_url = sys.argv[2]
data_dir = '{}/PyJoinMeet'.format(os.path.expanduser('~'))
chrome_driver_path = '{}/chromedriver'.format(data_dir)
if not os.path.exists(data_dir):
try:
os.mkdir(data_dir)
except:
print("ERROR: cannot create data dir")
sys.exit(-1)
if not os.path.exists('{}/{}'.format(data_dir, google_user)):
print("ERROR: Please Import User Data for {}".format(actual_user))
sys.exit()
if not os.path.exists('{}/{}/pyjoinmeet.ini'.format(data_dir, google_user)):
print("ERROR: Invalid Import for {}, cannot find pyjoinmeet.ini".format(actual_user))
sys.exit()
# Check gecko driver
if not os.path.exists(chrome_driver_path):
print("Please download and put the chromedriver at {}.".format(data_dir))
sys.exit(-1)
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--user-data-dir={}/{}".format(data_dir, google_user))
options.add_argument("start-maximized")
options.add_argument("--disable-infobars")
options.add_argument("--disable-extensions")
options.add_argument("--disable-gpu")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_experimental_option("excludeSwitches" , ["enable-automation","load-extension", "test-type"])
options.add_experimental_option("prefs", { \
"profile.default_content_setting_values.media_stream_mic": 1,
"profile.default_content_setting_values.media_stream_camera": 1,
"profile.default_content_setting_values.notifications": 1
})
config = configparser.ConfigParser()
config.read('{}/{}/pyjoinmeet.ini'.format(data_dir, google_user))
days = {
"monday" : 0,
"mon" : 0,
"tuesday" : 1,
"tue": 1,
"wednesday" : 2,
"wed" : 2,
"thu" : 3,
"thursday": 3,
"friday" : 4,
"fri" : 4,
"saturday": 5,
"sat": 5,
}
timezone = None
for i in pytz.all_timezones:
if config['DEFAULT']['timezone'].lower() in i.lower():
timezone = pytz.timezone(i)
break;
if timezone is None:
print("ERROR: Invalid Timezone")
driver.close()
sys.exit(0)
start_day = config['DEFAULT']['startday'].lower()
end_day = config['DEFAULT']['endday'].lower()
start_time = config['DEFAULT']['starttime'].lower()
end_time = config['DEFAULT']['endtime'].lower()
while True:
if start_day not in days or end_day not in days:
print("WARNING: Invalid day range, abort.")
break;
now = datetime.now(timezone)
if now.weekday() >= days[start_day] and now.weekday() <= days[end_day]:
print("INFO: Wating for the right time to join class")
start = start_time.split(':')
end = end_time.split(':')
# Convert everything to minutes.
startMinValue = (int(start[0]) * 60) + int(start[1])
endMinValue = (int(end[0]) * 60) + int(end[1])
now = datetime.now(timezone) # Get the required current timee
nowMinValue = getMinValue(now)
if startMinValue <= nowMinValue and endMinValue >= nowMinValue:
# Join the class now
print("INFO: Joining Meet {} as {}.".format(google_meet_url, actual_user))
# Start the driver
driver = webdriver.Chrome(options=options, executable_path=chrome_driver_path)
joined = False
driver.get(google_meet_url)
time.sleep(10)
try:
driver.find_element_by_xpath("//span[contains(text(), 'Join now' )]").click()
joined = True
except NoSuchElementException:
try:
driver.find_element_by_xpath("//span[contains(text(), 'Ask to join')]").click()
joined = True
except NoSuchElementException:
print("WARNING: Cannot JOIN")
joined = False
while True:
if not joined:
print("WARNING: Cannot JOIN")
now = datetime.now(timezone) # Get the required current time.
nowMinValue = getMinValue(now)
if nowMinValue > endMinValue:
break
time.sleep(30)
# End Class
if joined:
print("Now Hour: {}, Now Minute: {}".format(now.hour, now.minute))
print("INFO: Leaving Meet {} as {}.".format(google_meet_url, actual_user))
driver.close()
driver.quit()
else:
print("FATAL: Did not attend session because could not join")
time.sleep(5);
else:
time.sleep(30);
sys.exit()
|
from django.contrib import admin
from members.models import Member, Sub
class MemberAdmin(admin.ModelAdmin):
pass
admin.site.register(Member, MemberAdmin)
class SubAdmin(admin.ModelAdmin):
pass
admin.site.register(Sub, SubAdmin)
|
#import headers
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from pyspark.sql.types import StringType
from pyspark.sql import Row
if __name__ == "__main__":
#Create Spark context with spark configuration
conf = SparkConf().setAppName("CSV USED UNION OPERATION")
sc = SparkContext(conf=conf)
sqlcon = SQLContext(sc)
df = sqlcon.read.format("com.databricks.spark.csv")\
.option("header", "true")\
.load("Uni.csv")
df.createOrReplaceTempView("df")
df.show()
df1 =df.drop("C")
df1.createOrReplaceTempView("df1")
df1.show()
df2 =df.drop("B")
df2.createOrReplaceTempView("df2")
df2.show()
df3 = df1.union(df2)
df3.createOrReplaceTempView("df3")
df3.show()
sc.stop() |
import requests
import pickle
regions = [
"Aguascalientes",
"Baja California Norte",
"Baja California Sur",
"Campeche",
"Chiapas",
"Chihuahua",
"Coahuila",
"Colima",
"Ciudad de México",
"Durango",
"Estado de México",
"Guanajuato",
"Guerrero",
"Hidalgo",
"Jalisco",
"Michoacán",
"Morelos",
"Nayarit",
"Nuevo León",
"Oaxaca",
"Puebla",
"Querétaro",
"Quintana Roo",
"San Luis Potosí",
"Sinaloa",
"Sonora",
"Tabasco",
"Tamaulipas",
"Tlaxcala",
"Veracruz",
"Yucatán",
"Zacatecas"
]
def getRegionLatLon(regions):
latlon = {}
for region in regions:
if not region in latlon:
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json?key=AIzaSyAH3VYuagoPFD67-SNs0zMJqXq14ty3BcY&address={}'.format(region))
data = r.json()
latlon[region] = data['results'][0]['geometry']['location']
return latlon
latlon = getRegionLatLon(regions)
print(latlon)
file = open('latlon.data', 'wb')
pickle.dump(latlon, file)
file.close()
|
#Jessica Marshall
#ECE414 Machine Learning
#Conjugate Priors Programming Assignment
#MSE plots - Gaussian mu
##########################################
#import libraries
import math
import numpy as np
import matplotlib.pyplot as plt
##########################################
#generate normally distributed observations with awgn
#here we assume the variance is known, the mean is unknown parameter
#this is the likelihood function in Baye's rule
mu = 0
sigma = 10
variance = sigma**2
precision = 1/variance
N = 150 #number of observations
mu_noise = 0
sigma_noise = sigma/2
variance_noise = sigma_noise**2
##########################################
#mean squared error of maximum likelihood
numIter = 10 #times we run the estimator (requires new data)
ML = np.zeros((numIter, N)) #hold max likelihood values of each observation update for each estimator run
data = np.zeros((numIter, N))
for i in range(0, numIter): #using the new mu and sigma, run estimator multiple times by generating list of observations multiple times
X_ML = np.random.normal(mu+ mu_noise, math.sqrt(variance + variance_noise), N) #generate 1000 observations
data[i] = X_ML
for j in range(0, N):
ML[i, j] = (1/(j+1))*(X_ML[:j+1].sum()) #store ML estimate for this observation index
#for each observation "set" calculate the MSE of each ML estimate
SE = (((ML - mu)**2))
MSE_ML = np.mean(SE, axis=0)
#plot mean squared error of max likelihood estimate at each observation
fig2 = plt.figure()
x = np.linspace(1, N, N)
ax21 = fig2.add_subplot(1, 1, 1)
ax21.plot(x, MSE_ML, 'b', label='MSE of Max Likelihood Estimate')
ax21.set_title('MSE of Max Likelihood Estimate and Conjugate Prior - Mean of Gaussian with Known Variance', fontweight='bold')
##########################################
#update equations for Gaussian
#the conjugate prior of the Gaussian with known variance is a Gaussian
#define hyperparameters of initial prior
mu_0 = [-5, 2, 6] #choose 3 different hyperparameter mus & sigmas
sigma_0 = [10, 20, 2] #make this very broad
precision_0 = np.ones(len(sigma_0))/(np.power(sigma_0, 2))
color = ['y','r', 'c']
SE_conjprior= np.zeros((numIter, N))
for l in range(0, len(mu_0)):
#do this for multiple different hyperparameters
update_mu = mu_0[l]
update_sigma = sigma_0[l]
for i in range(0, numIter):
X_ML = data[i] #use same observations as max likelihood for each trial to ensure comparability
for j in range(0, N): #N is the obseration in question, one index off
n_update = j + 1
sum_xn = sum(X_ML[0:n_update])
update_mu = ((mu_0[l]*precision_0[l]) + (sum_xn*precision))/((precision_0[l])+(n_update/variance)) #where sum is the sum of observations up to xn
update_variance = 1/(precision_0[l] + n_update*precision)
update_sigma = math.sqrt(update_variance)
#mean squared error of update parameters
SE_conjprior[i, j] = (1/(j+1))*((mu-update_mu)**2)
#plot MSE of conjugate prior update at each obsercation
MSE_conjprior = np.mean(SE_conjprior, axis=0)
ax22 = fig2.add_subplot(1, 1, 1)
ax22.plot(x, MSE_conjprior, color[l], label='MSE of Conjugate Prior: mu = ' + str(mu_0[l]) + ', sigma = ' + str(sigma_0[l]))
handles, labels = ax21.get_legend_handles_labels()
ax22.legend(handles, labels)
ax22.set_xlabel('Observations')
ax22.set_ylabel('Mean Squared Error')
|
from models.cards.audio_card import AudioCard
import pytest
xfail = pytest.mark.xfail
@xfail
def test_audio_site(cards_table, db_conn):
"""
Expect an audio card to require site.
"""
card, errors = AudioCard.insert(db_conn, {
'unit_id': 'RUF531',
'name': 'What is?',
'audio_id': 'AJkl78',
})
assert len(errors) == 1
card, errors = card.update(db_conn, {'site': 'soundcloud'})
assert len(errors) == 0
@xfail
def test_audio_audio_id(cards_table, db_conn):
"""
Expect an audio card to require audio_id.
"""
card, errors = AudioCard.insert(db_conn, {
'unit_id': 'RUF531',
'name': 'What is?',
'site': 'soundcloud',
})
assert len(errors) == 1
card, errors = card.update(db_conn, {'audio_id': 'JKfoej89'})
assert len(errors) == 0
@xfail
def test_validate_response(db_conn, cards_table):
"""
Expect to check if a given response is valid for the card kind.
"""
assert False
|
class QuickFind:
parent = []
def __init__(self, n):
self.n = n
self.count = n
for i in range(self.n):
self.parent.append(i)
def union(self, p, q):
pid = self.parent[p]
qid = self.parent[q]
if pid == qid:
return
for i in range(self.n):
if self.parent[i] == pid:
self.parent[i] = qid
self.count -= 1
def connected(self, p, q):
print("Connected Components:" , self.count)
print("Parent Array: ", self.parent)
print("Is", str(p), "and", str(q), "connected: ", self.parent[p] == self.parent[q])
|
from django import forms
from .models import TripPersonalInfo, TripBooking, CustomTrip, Subscription, Review
class TripPersonalInfoForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TripPersonalInfoForm, self).__init__(*args, **kwargs)
self.fields['title'].required = True
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
self.fields['phone_number'].required = True
self.fields['group_of_people'].required = True
class Meta:
model = TripPersonalInfo
fields = ('title', 'first_name', 'middle_name', 'last_name', 'email', 'phone_number',
'passport_number', 'place_of_issue', 'issue_date', 'expire_date',
'emergency_contact_number', 'are_children_included', 'group_of_people',
'people_above_60_age')
class TripBookForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TripBookForm, self).__init__(*args, **kwargs)
self.fields['trip_name'].required = True
self.fields['nationality'].required = True
self.fields['start_date'].required = True
self.fields['trip_name'].widget.attrs.update({'class' : 'book_trip'})
class Meta:
model = TripBooking
fields = ('nationality', 'trip_name', 'start_date')
class CustomTripForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CustomTripForm, self).__init__(*args, **kwargs)
self.fields['trip_name'].required = True
self.fields['duration'].required = True
self.fields['price_range'].required = True
self.fields['full_name'].required = True
self.fields['email'].required = True
self.fields['contact'].required = True
class Meta:
model = CustomTrip
fields = ('trip_name', 'duration', 'price_range', 'full_name', 'email', 'country', 'contact', 'message')
class SubscriptionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SubscriptionForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['last_name'].required = True
self.fields['email'].required = True
class Meta:
model = Subscription
fields = '__all__'
class ReviewForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ReviewForm, self).__init__(*args, **kwargs)
self.fields['full_name'].required = True
self.fields['email'].required = True
self.fields['address'].required = True
self.fields['review'].required = True
self.fields['content'].required = True
self.fields['contact'].required = True
class Meta:
model = Review
fields = ('full_name', 'email', 'address', 'review', 'content', 'contact', 'rating')
|
from django.conf.urls import patterns, url
urlpatterns = patterns('question.views',
url(r'^leaderboard/$', 'leaderboard', name='leaderboard'),
url(r'^$', 'question_home', name='home'),
url(r'^(?P<qno>\d+)/$', 'question', name='question'),
url(r'^(?P<qno>\d+)/details/$', 'question_details', name='question_details'),
url(r'^language/(?P<lno>\d+)/details/$', 'language_details', name='language_details'),
url(r'^detail_list/', 'detail_list', name='detail_list'),
url(r'^attempt/(?P<att>\d+)/$', 'attempt', name='attempt'),
)
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
# 书籍基本信息模型
class BookInfo(models.Model):
# 书籍类型选项
BOOK_TYPE_CHOICES = (
(1, "仙剑"),
(2, "玄幻"),
(3, "悬疑"),
(4, "奇幻"),
(5, "军事"),
(6, "历史"),
(7, "竞技"),
(8, "科幻"),
(9, "军事"),
(10, "校园"),
(11, "社会"),
(12, "其它"),
)
# 书籍更新状态选项
BOOK_STATE_CHOICE = (
(0, "未完结"),
(1, "已完成"),
)
# 书名
name = models.CharField("书名", max_length=20)
# 书籍封面
cover = models.ImageField("书籍封面")
# 书籍简介
describe = models.TextField("书籍概要简介")
# 书籍类型
type = models.SmallIntegerField("书籍类型", default = "仙剑", choices = BOOK_TYPE_CHOICES)
# 书籍字数
word_number = models.IntegerField("字数", default=0)
# 更新状态
state = models.SmallIntegerField("更新状态", default = "未完成", choices = BOOK_STATE_CHOICE)
# 目前章节数量
chapters_number = models.SmallIntegerField("更新章节数", default=0)
# 最近一次的更新时间
update_time = models.DateTimeField("更新时间", auto_now_add=True)
# 点击量
clicks_number = models.IntegerField("点击量", default=0)
# 订阅量
subscribers_number = models.IntegerField("订阅量", default=0)
# 追书量
chase_books_number = models.IntegerField("追书量", default=0)
def add_clicks_number(self):
self.clicks_number += 1
def add_subscriber_number(self):
self.subscribers_number += 1
def add_chase_number(self):
self.chase_books_number += 1
def __unicode__(self):
return u'%s %s %s %s %s %s %s %s %s' %(self.name, self.type, self.word_number, self.state, self.chapters_number, self.update_time, self.clicks_number, self.subscribers_number, self.chase_books_number)
class Meta:
db_table = "book_info"
# 书籍内容模型
class BooksContent(models.Model):
# 对应的图书ID
book_id =models.ForeignKey(BookInfo, verbose_name="书本ID")
# 章节数
chapters_id = models.SmallIntegerField("章节数")
# 章节名称
chapters_name = models.CharField("章节名称")
# 该章节的内容
chapters_content = models.TextField("章节内容")
# 改章节更新时间
update_time = models.DateTimeField("章节更新时间", auto_now_add=True)
class Meta:
db_table = "book_content"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author:rachpt
from random import random
from threading import Thread
from functools import partial
from datetime import date, timedelta
from time import sleep, strftime, localtime
from tkinter import (
StringVar,
E,
W,
Label,
Button,
Radiobutton,
Frame,
IntVar,
LabelFrame,
DISABLED,
ACTIVE,
NORMAL,
messagebox,
)
from . import backend
from . import mymessage
class RunPage(Frame):
def __init__(self, parent, controller):
Frame.__init__(self, parent)
self.controller = controller
self.reserve_time = StringVar()
self.reserve_date = StringVar()
self.success = StringVar()
self.success.set("No")
self.counter = IntVar()
self.counter.set(0)
self.run_flag = IntVar()
self.run_flag.set(0)
self.T = {}
self.message_count_down = False
self.show_notice = True
self.successed_info = []
self.Config_Path = self.controller.Config_Path
self.Cookie_Path = self.controller.Cookie_Path
self.frame_1 = LabelFrame(
self, text="选择预定日期与开始时间(点击自动选择并查询)", height=100, width=630
)
self.day = 0
self.days = {}
self.choose_days = {}
for i in range(7):
_today = date.today()
self.days[i] = StringVar()
_day = _today + timedelta(days=i)
_day = _day.strftime("%Y-%m-%d")
self.days[i].set(_day)
self.choose_days[i] = Radiobutton(
self.frame_1,
text=self.days[i].get(),
variable=self.reserve_date,
value=self.days[i].get(),
command=partial(self.set_reserve_date, i),
)
self.times = {}
self.choose_times = {}
for i in range(7):
self.times[i] = StringVar()
_time = "{0:02d}:00:00".format(8 + 2 * i)
self.times[i].set(_time)
self.choose_times[i] = Radiobutton(
self.frame_1,
text=self.times[i].get(),
variable=self.reserve_time,
value=self.times[i].get(),
command=self.set_reserve_time,
)
# -------------------
self.frame_2 = LabelFrame(self, height=150, width=630)
self.label_date_1 = Label(self.frame_2, text="预定日期:", anchor=E)
self.label_date_2 = Label(
self.frame_2, textvariable=self.reserve_date, anchor=W
)
self.label_time_1 = Label(self.frame_2, text="预定时间段(2小时):", anchor=E)
self.label_time_2 = Label(
self.frame_2, textvariable=self.reserve_time, anchor=W
)
self.label_couner = Label(self.frame_2, text="刷新次数:", anchor=E)
self.couner_num = Label(self.frame_2, textvariable=self.counter)
self.label_sucessed = Label(self.frame_2, text="是否预定成功?:", anchor=E)
self.is_sucessed = Label(self.frame_2, bg="Red", textvariable=self.success)
self.button_start = Button(
self.frame_2, text="开始监控", bg="SpringGreen", command=self.start_job
)
self.button_stop = Button(
self.frame_2,
text="结束",
state=DISABLED,
bg="LightGray",
command=self.stop_job,
)
self.label_notice = Label(self.frame_2, text="显示警告与提示?", anchor=E)
self.button_notice = Button(
self.frame_2, text="是", bg="Pink", command=self.turn_on_notice
)
self.label_sucessed_place = Label(self.frame_2, text="预定成功的场地:", anchor=E)
self.label_successed_place_info = Label(self.frame_2)
# -------------------
self.frame_3 = LabelFrame(self, text="场地状态(点击刷新)", height=600, width=630)
self.courts = {}
self.show_courts = {}
for i in range(8):
self.courts[i] = IntVar()
self.courts[i].set("")
self.show_courts[i] = Button(
self.frame_3,
font=("Helvetica 10"),
text="{}号场地".format(i + 1),
command=self.get_status,
)
self.create_page()
def create_page(self):
f_x = 56
height = 28
space = 20
f1_width = 98
f3_width = 120
self.frame_1.place(
x=f_x - 30, y=space, width=700, height=height * 2 + space * 3
)
for i in range(7):
self.choose_days[i].place(
x=5 + f1_width * i, y=10, width=f1_width, height=height
)
self.choose_times[i].place(
x=5 + f1_width * i, y=20 + height, width=f1_width, height=height
)
if not self.reserve_date.get():
self.choose_days[2].select()
self.day = 2
if not self.reserve_time.get():
self.choose_times[6].select()
self.frame_2.place(
x=f_x,
y=space + height * 4 + space,
width=630,
height=height * 3 + space * 4,
)
self.label_date_1.place(x=space, y=space, width=120, height=height)
self.label_date_2.place(x=space + 120, y=space, width=80, height=height)
self.label_time_1.place(x=space, y=space * 2 + height, width=120, height=height)
self.label_time_2.place(
x=space + 120, y=space * 2 + height, width=80, height=height
)
self.button_start.place(x=space + 100 + 100, y=space, width=180, height=height)
self.button_stop.place(
x=space + 120 + 80, y=space * 2 + height, width=180, height=height
)
self.label_couner.place(
x=space * 2 + 100 + 100 + 180, y=space, width=100, height=height
)
self.couner_num.place(
x=space * 2 + 100 + 100 + 180 + 100, y=space, width=80, height=height
)
self.label_sucessed.place(
x=space * 2 + 100 + 100 + 180,
y=space * 2 + height,
width=100,
height=height,
)
self.is_sucessed.place(
x=space * 2 + 100 + 100 + 180 + 100,
y=space * 2 + height,
width=80,
height=height,
)
self.label_notice.place(
x=space, y=space * 3 + height * 2, width=120, height=height
)
self.button_notice.place(
x=space + 120, y=space * 3 + height * 2, width=50, height=height
)
# -------------------
self.frame_3.place(x=f_x, y=150 + 100 + space * 4, width=630, height=height * 6)
for i in range(8):
self.show_courts[i].place(
x=10 + (f3_width + 40) * (i % 4),
y=10 + (height * 2 + space) * (i // 4),
width=f3_width,
height=height * 2,
)
self.show_courts[i].configure(
background="LightGray", highlightbackground="Gold", foreground="Black"
)
def job(self):
_st = "07:59:30" # 开始时间
_end = "22:00:00" # 结束时间
i = 1 # 刷新次数计数器
infos = backend.load_config(self.Config_Path)
while True:
if self.run_flag.get() == 0:
break
elif _st <= strftime("%H:%M:%S", localtime()) < _end:
if backend.judge_time():
dt = 2
else:
dt = 20
self.update_status(True, infos, dt)
else:
dt = 40
self.update_status(False, infos, dt)
sleep(dt)
self.counter.set(i)
self.couner_num.configure(textvariable=self.counter)
i += 1
def start_job(self):
if self.run_flag.get() == 0 and self.success.get() == "No":
self.run_flag.set(1)
for i in range(7):
self.choose_days[i].config(state=DISABLED)
self.choose_times[i].config(state=DISABLED)
self.button_start.configure(
bg="LightGray", state=ACTIVE, text="正在运行 ...", fg="Green"
)
self.button_stop.configure(bg="Tomato", state=NORMAL, text="结束", fg="Black")
# sort_place_order(self.controller)
ct = int(random() * 10000)
self.T[ct] = Thread(target=self.job, args=())
self.T[ct].daemon = True
self.T[ct].start()
elif self.success.get() == "Yes":
messagebox.showinfo("提示", " =_=已经预定到啦=_= \n\n 请网页上查看! \n")
else:
messagebox.showinfo("提示", " =_=已经在运行啦=_= \n\n 不要重复点击! \n")
def stop_job(self):
if self.run_flag.get() == 1:
self.run_flag.set(0)
for i in range(7):
self.choose_days[i].config(state=NORMAL)
self.choose_times[i].config(state=NORMAL)
self.button_stop.configure(bg="Gray", state=ACTIVE, text="已经停止", fg="White")
self.button_start.configure(
bg="SpringGreen", state=NORMAL, text="开始监控", fg="Black"
)
else:
messagebox.showinfo("提示", " =_=当前没有后台监控任务=_= \n\n 不要重复点击! \n ")
def update_status(self, doit=False, infos=None, dt=0, mark=True):
"""doit 预定 flag,infos 同伴信息,dt 睡眠时间,秒;mark 用于防止递归。"""
_date = self.reserve_date.get()
_time = self.reserve_time.get()
if _date and _time:
res = {}
court = backend.pian_status
res, _ = backend.get_status(
self.Config_Path, self.Cookie_Path, (_date, _time)
)
if infos and infos["place_sort"]:
sorted_keys = sort_place_order(court, infos["place_sort"])
else:
sorted_keys = res.keys()
for key in sorted_keys:
# 2:已预约;4:不开放;1:可预约;3:使用中;5:预约中,'':不可预约
ii = int(court[key])
res_status = res[key][0]
res_note = res[key][1]
if res_status == 1:
self.try_to_reverse(doit, infos, key, ii, _date, _time, dt)
elif res_status == 2:
self.show_courts[ii - 1].configure(
text="{}号场地\n已被预约".format(ii),
background="Black",
highlightbackground="Gold",
foreground="Gold",
font=("Helvetica 10"),
)
elif res_status == 3:
self.show_courts[ii - 1].configure(
text="{}号场地\n使用中".format(ii),
background="Yellow",
highlightbackground="Gold",
foreground="Gold",
font=("Helvetica 10"),
)
elif res_status == 4:
self.show_courts[ii - 1].configure(
text="{}号场地\n不开放".format(ii),
background="Gray",
highlightbackground="Gold",
foreground="White",
font=("Helvetica 10"),
)
if res_note:
if len(res_note) >= 10:
self.show_courts[ii - 1].configure(
text="{}号场地(不开放)\n{}".format(ii, res_note),
font=("Helvetica 8"),
)
else:
self.show_courts[ii - 1].configure(
text="{}号场地(不开放)\n{}".format(ii, res_note)
)
elif res_status == 5:
self.show_courts[ii - 1].configure(
text="{}号场地\n预约中".format(ii),
background="Green",
highlightbackground="Gold",
foreground="Cyan",
font=("Helvetica 10"),
)
else:
self.show_courts[ii - 1].configure(
text="{}号场地\n不可预约".format(ii),
background="LightGray",
highlightbackground="Gold",
foreground="Gold",
font=("Helvetica 10"),
)
if mark:
self.mark_successed_place(court, _date, _time)
if doit and infos:
# 没有可预定或正在预定的场地,则退出预定
if res and (1, "") not in res.values() and (5, "") not in res.values():
self.stop_job() # 退出线程
messagebox.showinfo(
"提示",
"-" * 20
+ "\n =_=没有可预约的场地=_= \n\n 请选择其他时间和日期的场地预约! \n ",
)
def try_to_reverse(self, doit, infos, key, ii, _date, _time, dt):
"""尝试预定单个场地"""
_text = "{}号场地\n可预约".format(ii)
if doit and infos and self.success.get() != "Yes" and self.run_flag.get() == 1:
is_ok = False
try:
is_ok = backend.appointment(
self.Config_Path,
self.Cookie_Path,
key,
_date,
_time,
infos,
self.day,
)
except UserWarning as UW:
msg = (
"-" * 20 + "\n{}\n".format(UW) + "-" * 20 + "\n{}秒后重试".format(dt),
)
if not self.message_count_down and self.show_notice:
mymessage.CountDownMessageBox(self, msg)
except Warning as War:
_text = "{}号场地\n尝试预约,已失败".format(ii)
self.stop_job() # 退出线程
msg = "-" * 20 + "\n错误信息:\n{}\n".format(War) + "-" * 20
if self.show_notice:
messagebox.showerror("发生错误", msg)
if is_ok:
self.success.set("Yes")
self.successed_info = [key, _date, _time]
self.stop_job() # 退出线程
self.color_target_court(ii, _text)
def color_target_court(self, ii, _text):
"""上色 可预约场地"""
self.show_courts[ii - 1].configure(
text=_text,
background="Green",
highlightbackground="Gold",
foreground="Gold",
font=("Helvetica 10"),
)
def mark_successed_place(self, court, _date, _time):
"""标记已经预定了的场地"""
if (
self.successed_info
and _date == self.successed_info[1]
and _time == self.successed_info[2]
):
# 更新场地状态,如果显示已被预定,就表示成功预定该场地。
self.update_status(mark=False)
res, _ = backend.get_status(
self.Config_Path, self.Cookie_Path, (_date, _time)
)
key = self.successed_info[0]
if res[key][0] == 2:
ii = int(court[key])
# 高亮 成功 YES
self.is_sucessed.configure(
textvariable=self.success, bg="LightGray", fg="Magenta"
)
# 显示预定信息
success_text = str(ii) + "号 " + _date + " " + _time
self.label_successed_place_info.configure(
fg="Magenta", text=success_text
)
self.label_sucessed_place.place(
x=20 + 200, y=20 * 3 + 28 * 2, width=180, height=28
)
self.label_successed_place_info.place(
x=20 + 380, y=20 * 3 + 28 * 2, width=200, height=28
)
self.show_courts[ii - 1].configure(
text="{}号场地\n程序预约了该场地".format(ii),
background="Magenta",
highlightbackground="Green",
foreground="White",
font=("Helvetica 10"),
)
else:
self.successed_info = []
self.success.set("No")
def set_reserve_date(self, day):
self.update_status()
self.day = day
def set_reserve_time(self):
self.update_status()
def get_status(self):
if self.run_flag.get() != 1:
self.update_status()
def turn_on_notice(self):
if self.show_notice:
self.show_notice = False
self.button_notice.configure(text="否", bg="LightGray")
else:
self.show_notice = True
self.button_notice.configure(text="是", bg="Pink")
def sort_place_order(place_dict, order_str):
if order_str:
reversed_place_dict = {v: k for k, v in place_dict.items()}
ret_list = []
order_str_list = order_str.split()
for i in order_str_list:
if "0" <= i <= "8":
ret_list.append(reversed_place_dict[str(i)])
# 补全
for i in range(1, 9):
i = str(i)
if i not in order_str_list:
ret_list.append(reversed_place_dict[i])
return ret_list
else:
return place_dict.keys()
|
#!/usr/bin/env python3
"""
- Create functions to get a reference genome and a dataset of aligned genomes.
- Search where in the reference genome this dataset starts.
- use regex to do it.
- Also get a reference polyprotein record, associated with the reference genome.
- Make a mapping between a position in the genome
and an aminoacid in the polyprotein.
- Make a dict to keep {gen_pos_range : protein}
- ALL POSITIONS START AT ZERO!!! 000000000000
Duplicate line: Cmd + Shift + D
Move the current line Up or Down: Cmd + Ctrl + Up
Select the next matching characters: Cmd + D
Unselect Cmd + U
Select all matching characters Cmd + Ctrl + G
Toggle comments Cmd + /
"""
import Bio
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.Alphabet import IUPAC
import pandas as pd
import numpy as np
import re
import datetime
"""
#######################################################################
#######################################################################
"""
def read_data(ref_genome_file, ref_polyprot_file, querry_seq_file):
"""
Reads data into Bio SeqRecord
"""
genome = SeqIO.read(ref_genome_file, "genbank")
polyprot = SeqIO.read(ref_polyprot_file, "genbank")
querry = SeqIO.parse(querry_seq_file, "fasta")
first_record = next(querry)
querry_seq = first_record.seq
return (genome, polyprot, querry_seq)
def find_align_start(seq, ref_genome, search_size):
"""
Given a dataset (multifasta alignment in '.aln' clustal format)
and a reference genbank file (in '.gb' format), finds
where in the reference sequence the dataset starts.
It uses regex to find the initial sequence pattern in the reference genome.
`search_size` is the number of nucleotides to include in the regex.
Returns the position relative to the reference.
"""
ref = str(ref_genome.seq)
regex = re.compile(str(seq[:search_size]))
ref_location = regex.search(ref)
start_pos_ref = ref_location.start()
return start_pos_ref
def read_polyprotein(ref_polyprot):
"""
Given a polyprotein genbank file (in '.gp' format), parses through
its features and returns a dictionary containing the proteins
names as keys and positions (start:end in "biopython location") as values.
"""
dic_prot = {}
for feature in ref_polyprot.features:
if 'region_name' in feature.qualifiers:
value = (feature.location)
key = (feature.qualifiers['region_name'][0])
dic_prot[key] = value
return dic_prot
def pos_aminoacid(nn_pos, seq_rel_start, ref_genome, ref_polyprot):
"""
nn_pos: nucleotide position in the dataset sequence.
seq_rel_start: position where the dataset starts in the reference genome.
Given a nucleotide position (int) and a reference genome (".gb" format),
returns:
aa_pos: Which aminoacid position in the translated polyprotein
it is in.
aa: The translated amino acid.
codon: The codon in the reference genome.
codon_pos: The codon position (0, 1, 2).
"""
for feature in ref_genome.features:
if feature.type == "CDS":
cds_start = int(feature.location.start)
break
# position in the reference genome CDS, relative to its CDS start.
cds_pos = (nn_pos + seq_rel_start) - cds_start
# aminoacid position in the polyprotein
aa_pos = (cds_pos) // 3
# nucleotide position inside codon (0, 1, 2)
codon_pos = (cds_pos) % 3
# translated aminoacid
aa = ref_polyprot.seq[aa_pos]
# codon starting position in the dataset sequence
codon_start = nn_pos - codon_pos
# codon start pos in the reference genome.
ref_codon_start_pos = codon_start + seq_rel_start
# three letter codon
codon = ref_genome.seq[ref_codon_start_pos:ref_codon_start_pos+3]
return (aa_pos, aa, codon, codon_pos)
def seq_snv_info(nn_pos, seq, ref_genome, ref_polyprot, search_size=20):
"""
given a position in a database sequence, returns:
- sequence codon
- sequence aminoacid
- reference codon
- reference aminoacid
- codon position
"""
seq_rel_start = find_align_start(seq, ref_genome, search_size)
# codon start pos in the reference genome.
for feature in ref_genome.features:
if feature.type == "CDS":
cds_start = int(feature.location.start)
break
# position in the reference genome relatice to its start.
ref_pos = (nn_pos + seq_rel_start)
# position in the reference genome CDS, relative to its CDS start.
cds_pos = (nn_pos + seq_rel_start) - cds_start
# aminoacid position in the polyprotein
aa_pos = (cds_pos) // 3
# nucleotide position inside codon (0, 1, 2)
codon_pos = (cds_pos) % 3
# codon starting position in the dataset sequence
codon_start = nn_pos - codon_pos
# three letter codon
codon_seq = seq[codon_start:codon_start+3]
aa_seq = codon_seq.translate()
ref_codon_start_pos = codon_start + seq_rel_start
# three letter codon
codon_ref = ref_genome.seq[ref_codon_start_pos:ref_codon_start_pos+3]
# translated aminoacid
aa_ref = ref_polyprot.seq[aa_pos]
return (codon_seq, aa_seq, ref_pos, codon_ref, aa_ref, codon_pos)
def which_protein(nn_snv, aa_pos, dic_prot, case):
"""
Given an aminoacid position, returns in which protein inside the
polyprotein it is.
"""
print("SNV at position {} on genomes under analysis\n".format(nn_snv))
log_file = './OUTPUT/log_{}_SNV_INFO.txt'.format(case)
for prot in dic_prot:
if aa_pos in dic_prot[prot]:
print(prot)
start = int(dic_prot[prot].start)
print('Protein start in reference polyprotein: {}'.format(start))
print('Residue "{}" position in reference polyprotein: {}'.format(aa_ref, aa_pos))
pos_in_protein = aa_pos - start
print('Residue "{}" position in {} protein: {}'.format(aa_ref, prot, pos_in_protein))
neighbours = ref_polyprot.seq[aa_pos-5 : aa_pos +6]
print('Neighbouring residues ("{}" is in the middle): {}'.format(aa_ref, neighbours))
with open(log_file, 'a') as log:
x = datetime.datetime.now()
log.write('SNV info:\n{0}\n\n'.format(x))
log.write("SNV at position {} on genomes under analysis.\n".format(nn_snv))
log.write('Protein: {}.\n'.format(prot))
log.write('Protein start in reference polyprotein: {}.\n'.format(start))
log.write('Residue "{}" position in reference polyprotein: {}.\n'.format(aa_ref, aa_pos))
log.write('Residue "{}" position in {} protein: {}.\n'.format(aa_ref, prot, pos_in_protein))
log.write('Neighbouring residues ("{}" is in the middle): {}.\n'.format(aa_ref, neighbours))
log.write('////////////////////////////////////////////////\n\n')
return prot
break
"""
#######################################################################
MAIN
#######################################################################
"""
if __name__ == "__main__":
#%%
# NOVO
# First I read the sequences I'll be working with.
# Even though both references are in GB format, which contains a lot of
# metadata info, SeqIO.read knows to separate it neatly so I can get
# the nucleotide or aminoacid sequence just by calling its "seq" attribute.
# The querry sequence must be one of the sequences I'm actually analizing.
# It must be the one that starts first and, preferably, with no gaps in
# its first 10 to 20 positions.
# IDEA for automatically finding a suitable querry inside a dataset:
# for seq in fasta:
# for position in seq:
# if nucleotide is not gap:
# lowest_position = position
# if lowest_position < best_lowest_position:
# if nucleotides from lowest_position to lowest_position+10 are not gap:
# best_lowest_position = lowest_position
# best_seq = seq
case = "HUMAN"
ref_genome_file = '../DATA/Reference_GENBANK_YFV/YFV_BeH655417_JF912190.gb'
ref_polyprot_file = '../DATA/Reference_GENBANK_YFV/YFV_BeH655417_JF912190_polyprot.gb'
querry_seq_file = '../DATA/Human_Analisys/DATA/querry_seq_marielton.fas'
ref_genome = SeqIO.read(ref_genome_file, "genbank")
ref_polyprot = SeqIO.read(ref_polyprot_file, "genbank")
querry = SeqIO.parse(querry_seq_file, "fasta")
first_record = next(querry)
querry_seq = first_record.seq
#%%
# (ref_genome, ref_polyprot, querry_seq) = read_data(ref_genome_file, ref_polyprot_file, querry_seq_file)
seq_rel_start = find_align_start(querry_seq, ref_genome, 10)
print(seq_rel_start)
s1 = querry_seq[:20]
s2 = ref_genome[seq_rel_start:seq_rel_start+20].seq
s1 == s2
dic_prot = read_polyprotein(ref_polyprot)
nn_snv = 1000
(aa_pos, aa, codon, codon_pos) = pos_aminoacid(nn_snv, seq_rel_start, ref_genome, ref_polyprot)
print(aa_pos)
print(aa)
print(codon)
print(codon_pos)
prot = which_protein(nn_snv, aa_pos, dic_prot, case)
print(prot)
(codon_seq, aa_seq, ref_pos, codon_ref, aa_ref, codon_pos) = seq_snv_info(1000, querry_seq, ref_genome, ref_polyprot)
codon_seq
aa_seq
codon_ref
aa_ref
codon_pos
ref_pos
ref_polyprot.seq[aa_pos]
"""
#######################################################################
#######################################################################
"""
|
"""opencads URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path,re_path
from event import views
app_name = 'event'
urlpatterns = [
re_path(r"^$", views.QuestionListView.as_view(), name="index_all"),
re_path(
r"^event-detail/(?P<pk>\d+)/$",
views.EventDetailView.as_view(),
name="event_detail",
),
re_path(r"^post_event/$", views.CreateEventView.as_view(), name="post_event"),
re_path(r"^event/vote/$", views.event_vote, name="event_vote"),
]
|
def multiplydigit(s1, digit, shift):
if digit == '0':
return [0]
res = []
n = len(s1)
carry = 0
for i in range(n):
c = int(digit) * int(s1[i])
val = c % 10
res.append(carry + val)
carry = int(c / 10)
if carry > 0:
res.append(carry)
if shift > 0:
res = ([0] * shift) + res
return res
def addResToAns(ans, res):
carry = 0
i = 0
for digit in res:
if i < len(ans):
val = digit + ans[i] + carry
ans[i] = val % 10
else:
val = digit + carry
ans.append(val % 10)
carry = int(val / 10)
i += 1
if carry > 0:
ans.append(carry)
def mul(s1, s2):
s1 = s1[::-1]
s2 = s2[::-1]
ans = []
m = len(s2)
for j in range(m):
res = multiplydigit(s1, s2[j], j)
addResToAns(ans, res)
result = ""
n = len(ans)
for i in range(n):
result += str(ans[i])
return result[::-1]
if __name__ == '__main__':
print(mul("99", "9"))
|
import socket
import json
class LVconnection():
def __init__(self):
self.conn = None
def recv_data(self):
assert(self.conn!=None)
buffer = ''
while True:
try:
#read from the connection until CRLF
#self.conn.settimeout(0.5) #set the timeout to 500 ms
rtrn_msg = self.conn.recv(1024).decode("utf-8")
buffer+=rtrn_msg
if r'\r\n' in buffer:
buffer = buffer.replace(r'\r\n','')
break
except Exception as e:
print('Warming: message not received, Error:{}'.format(e))
break
return buffer
def send_data(self,data):
assert(self.conn!=None)
self.conn.sendall(bytes(data+'\r\n','utf-8'))
def connect(self): #connect to server
assert(self.conn==None)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 8089)
client.connect(server_address)
#client.setblocking(0) #this will allow the recv method to timeout
self.conn = client
def close(self): #close connection to server
assert(self.conn!=None)
#self.conn.shutdown('SHUT_RDWR')
self.conn.close()
self.conn = None
class LVfunctions():
def __init__(self):
self.LVconn = LVconnection()
def testing(self):
data = {
'Command':'Testing',
'Data':'This is a test'
}
msg = json.dumps(data)
self.LVconn.connect()
self.LVconn.send_data(msg)
recv_msg = self.LVconn.recv_data()
print(recv_msg)
recv_data = json.loads(recv_msg)
print(recv_data['Data'])
self.LVconn.close()
def get_mass(self,scan_info_in,data_in):
scan_info = {
'High Frequency (Hz)':list(scan_info_in['High Frequency (Hz)']),
'Low Frequency (Hz)':list(scan_info_in['Low Frequency (Hz)']),
'Peak Width':list(scan_info_in['Peak Width']),
'Peak Amplitude':list(scan_info_in['Peak Amplitude']),
'Fit Position (Hz)':list(data_in['Fit Position (Hz)'])
}
data={
'Time (s)':list(data_in['Time (s)']),
'Fit Position (Hz)':list(data_in['Fit Position (Hz)']),
'Mass To Charge (kg/e)':[x*1.6605390666E-27 for x in data_in['Mass To Charge (Da/e)']],#need to convert from Da to kg
'Q Step Threshold (Hz)':data_in['Q Step Threshold (Hz)'],
'Q step procedure Start Time (s)':data_in['Q step procedure Start Time (s)'],
'Q step procedure End Time (s)':data_in['Q step procedure End Time (s)'],
'Scan Information':scan_info
}
msg_in={
'Command':'get_mass',
'Data':json.dumps(data)
}
self.LVconn.connect()
self.LVconn.send_data(json.dumps(msg_in))
msg_out = self.LVconn.recv_data()
data_out = json.loads(msg_out)
self.LVconn.close()
data_out = {
'Time (s)': data_out['Time (s)'],
'Mass (Da)': [x/1.6605390666E-27 for x in data_out['Mass (kg)']],
'Charge (e)': data_out['Charge (e)'],
'Mass/Charge (Da/e)': [x/1.6605390666E-27 for x in data_out['Mass/Charge (kg/e)']]
}
return data_out
def filter_mass_data(self,data_in):
'''
parameters
----------
data : Dictionary of arrays and floats
keys:
High Frequency (Hz): ``list(float)``:
Low Frequency (Hz): ``list(float)``:
Peak Width: ``list(float)``:
Peak Amplitude: ``list(float)``:
UV Lamp: ``list(float)``:
Time (s): ``list(float)``:
Fit Position (Hz): ``list(float):
Mass To Charge (Da/e): ``list(float):
Charge (e): ``list(float):
Window Size: ``float``:
Max Sweep Range: ``float``:
'''
data = {
'High Frequency (Hz)':list(data_in['High Frequency (Hz)']),
'Low Frequency (Hz)':list(data_in['Low Frequency (Hz)']),
'Peak Width':list(data_in['Peak Width']),
'Peak Amplitude':list(data_in['Peak Amplitude']),
'UV Lamp':list(data_in['UV Lamp']),
'Time (s)':list(data_in['Time (s)']),
'Fit Position (Hz)':list(data_in['Fit Position (Hz)']),
'Mass To Charge (kg/e)':[x*1.6605390666E-27 for x in list(data_in['Mass To Charge (Da/e)'])],
'Charge (e)':list(data_in['Charge (e)']),
'Window Size':data_in['Window_Size'],
'Max Sweep Range':data_in['Max Sweep Range']
}
msg_in={
'Command':'filter_mass_data',
'Data':json.dumps(data)
}
self.LVconn.connect()
self.LVconn.send_data(json.dumps(msg_in))
msg_out = self.LVconn.recv_data()
data_out = json.loads(msg_out)
self.LVconn.close()
data_out = {
'Time (s)':list(data_out['Time (s)']),
'Fit Position (Hz)':list(data_out['Fit Position (Hz)']),
'Mass/Charge (Da/e)':[x/1.6605390666E-27 for x in list(data_out['Mass/Charge (kg/e)'])],
'Index':list(data_out['Index'])
}
return data_out
def get_charge(self,data_in):
data={
'Q Step Threshold (Hz)':data_in['Q Step Threshold (Hz)'],
'Time (s)':list(data_in['Time (s)']),
'Fit Position (Hz)':list(data_in['Fit Position (Hz)']),
'Mass To Charge (kg/e)': [x*1.6605390666E-27 for x in list(data_in['Mass To Charge (Da/e)'])]
}
msg_in={
'Command':'get_charge',
'Data':json.dumps(data)
}
self.LVconn.connect()
self.LVconn.send_data(json.dumps(msg_in))
msg_out = self.LVconn.recv_data()
data_out = json.loads(msg_out)
self.LVconn.close()
data_out = {
'Charge (e)':list(data_out['Charge (e)']),
'Mass (Da)':[x/1.6605390666E-27 for x in list(data_out['Mass (kg)'])],
'Ending Charge (e)':data_out['Ending Charge (e)']
}
return data_out
def closest_mass(self,data_in):
'''
Parameters
----------
data_in : Dictionary of arrays or float values
keys:
Time (s):``list(float):
Start Q (e):``float``:
Mass To Charge (Da/e):``list(float):
Returns
-------
data_out : Dictionary of arrays or float values
'''
data={
'Time (s)':list(data_in['Time (s)']),
'Start Q':data_in['Start Q (e)'],
'Mass To Charge (kg/e)':[x*1.6605390666E-27 for x in list(data_in['Mass To Charge (Da/e)'])]
}
msg_in={
'Command':'closest_mass',
'Data':json.dumps(data)
}
self.LVconn.connect()
self.LVconn.send_data(json.dumps(msg_in))
msg_out = self.LVconn.recv_data()
data_out = json.loads(msg_out)
self.LVconn.close()
data_out = {
'Charge (e)': data_out['Charge (e)'],
'Mass (Da)':[x/1.6605390666E-27 for x in list(data_out['Mass (kg)'])]
}
return data_out
|
# Created by ******** chomri01 at 12/18/2019
# Feature Name :: --
# To Do ::-
class BasePage:
def __init__(self,Browser):
self.time_out=30
self.Browser=Browser
pass
|
from flask import request
from flask_restful import Resource
from ..models.menuitem import MenuItem, MenuItemTypeEnum
from ..db import db
mapping = {
'breakfast': MenuItemTypeEnum.BREAKFAST,
'lunch': MenuItemTypeEnum.LUNCH,
'dinner': MenuItemTypeEnum.DINNER
}
class FoodItems(Resource):
def get(self):
found_items = MenuItem.query.all() # select * from menuitems
if found_items:
return [item.to_json() for item in found_items], 200
return {'message': 'Item not found'}, 404
def post(self):
data = request.get_json()
new_item = MenuItem(data['name'], mapping.get(data['type']), data['image_url'])
db.session.add(new_item)
db.session.commit()
return new_item.to_json(), 201
|
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from serialClient import serialClient
import re
def displaymatch(match):
if match is None:
return None
return '<Match: %r, groups=%r>' % (match.group(), match.groups())
class tcpServer(LineReceiver):
def __init__(self):
self.regex0 = re.compile(r"^(QUIT)$", re.IGNORECASE)
self.regex1 = re.compile(r"^(HELP)$", re.IGNORECASE)
self.regex2 = re.compile(r"^(PRINT)$", re.IGNORECASE)
self.regex3 = re.compile(r"^(PULSEON|PULSEOFF|SAMPLE)\s+(\d)$", re.IGNORECASE)
def connectionMade(self):
ip = self.transport.getPeer()
print "New connection from ", ip.host
def sendLine(self, cmd):
buf = cmd.strip() + "\r\n"
self.transport.write(buf)
# Called when timeout occurs
def lineReceived(self, line):
ip = self.transport.getPeer()
print "Data: ", repr(line), " received from ", ip.host
# Parse & verify
# Execute and return response
#
# Allowed commands:
# ---------------
# print : prints a list of managed devices
# sample # : returns a single reading from the device #
# pulseon # : sends a 2 second ON pulse to device #
# pulseoff # : sends a 2 second OFF pulse to device #
cmd = line.strip().upper()
if cmd:
res = self.regex0.match(cmd)
# QUIT command received
if res is not None:
self.transport.loseConnection()
return
res = self.regex1.match(cmd)
# HELP command received
if res is not None:
self.sendLine("HELP: short help text")
self.sendLine("QUIT: close connection")
self.sendLine("PRINT: list connected machines")
self.sendLine("PULSEON [N]: send on pulse to machine N")
self.sendLine("PULSEOFF [N]: send off pulse to machine N")
self.sendLine("SAMPLE [N]: sample data from machine N")
return
res = self.regex2.match(cmd)
# PRINT command received
if res is not None:
serialClient.listAll(self)
else:
res = self.regex3.match(cmd)
if res is not None:
serialClient.notify(self, int(res.groups()[1]), res.groups()[0])
else:
self.sendLine("Invalid command")
def connectionLost(self, reason):
ip = self.transport.getPeer()
print "Connection lost from ", ip.host
class tcpServerFactory(Factory):
protocol = tcpServer
def __init__(self):
pass
|
from layer_utils import *
import numpy as np
class TwoLayerNet(object):
def __init__(self, input_dim = 3 * 32 * 32, hidden_dim = 100,
num_classes = 10, weight_scale = 1e-3, reg = 0.0):
'''
:param input_dim: 输入的维度
:param hidden_dim: 中间隐层的神经元数
:param num_classes: 最终分类的类别
:param weight_scale: 权重初始化的小值
:param reg: 正则化惩罚权重项(力度)
:return:
'''
self.params = {}
self.reg = reg
#前后连接
self.params['w1'] = weight_scale * np.random.randn(input_dim, hidden_dim)
self.params['b1'] = np.zeros((1, hidden_dim))
self.params['w2'] = weight_scale * np.random.randn(hidden_dim, num_classes)
self.params['b2'] = np.zeros((1, num_classes))
#定义损失函数, y 是标签值
def loss(self, X, y = None):
scores = None
N = X.shape[0]
w1, b1 = self.params['w1'], self.params['b1']
w2, b2 = self.params['w2'], self.params['b2']
#中间有一个relu层
h1, cache1 = affine_relu_forward(X, w1, b1)
#输出的时候不需要relu层,直接前向传播
out, cache2 = affine_forward(h1, w2, b2)
scores = out
#如果y是空的话,这就是我们正在使用测试集,那么只需要返回得分值
if y is None:
return scores
#grads存梯度的值
loss, grads = 0, {}
#softmax分类器
data_loss, dscores = softmax_loss(scores, y)
#正则化惩罚项
reg_loss = 0.5 * self.reg * np.sum(w1 * w1) + 0.5 * self.reg * np.sum(w2 * w2)
#现在损失值 = 损失值 + 正则化损失值
loss = data_loss + reg_loss
dh1, dw2, db2 = affine_backward(dscores, cache2)
dx, dw1, db1 = affine_relu_backward(dh1, cache1)
#进行权重更新
dw2 += self.reg * w2
dw1 += self.reg * w1
grads['w1'] = dw1
grads['b1'] = db1
grads['w2'] = dw2
grads['b2'] = db2
return loss, grads
|
from src.Utils.Util import calcuateOffset,CenterModes
def test_calcuate_offset():
offset = calcuateOffset(10,100,1,4)
assert offset == 37
offset = calcuateOffset(10,100,3,4,CenterModes.DOWN)
assert offset == 82
|
#!/usr/bin/env python
import pymongo
import matplotlib.pyplot as plt
import os
from collections import namedtuple, defaultdict, deque
from datetime import date
from dateutil.relativedelta import relativedelta, MO, TU, WE, TH, FR, SA, SU
from graphviz import Graph, Digraph
from itertools import groupby, product
from operator import itemgetter
from utils import _sanitize_username, _sanitize_question
# This is needed when obtaining date for last Monday, Tuesday, etc.
WEEKDAYS = {'Mon': MO(-1), 'Tue': TU(-1), 'Wed': WE(-1), 'Thu': TH(-1),
'Fri': FR(-1), 'Sat': SA(-1), 'Sun': SU(-1)}
# Ordering months.
MONTHS = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
# Getting current year
CURR_YEAR = date.today().year
# Convenience data structure for holding date
Date = namedtuple('Date', ['day', 'month', 'year'], verbose=False)
def create_date_histogram(quora_data, questions_without_answers_only=False, filename='date_histogram.png'):
"""
:param quora_data: Collection of documents (list) related to questions
:param questions_without_answers_only: if True then we collect information only about questions w/o answers
:return: None
"""
# List of dates (when questions were last asked)
dates = []
for document in quora_data:
question = _get_question(document)
#print question
answer_count = document[question]['answer_count']
# This code should be skipped precisely when question has some answers
# and we are considering questions with answers.
if not questions_without_answers_only or answer_count == 0:
last_asked = document[question]['last_asked']
#print last_asked
date = _parse_date(last_asked)
#print date
dates.append(date)
dates = sorted(dates, key=lambda x: (x.year, x.month, x.day))
#print dates
dates_without_days = ['%02d/%d' % (x.month, x.year) for x in dates]
#print dates_without_days
# Removing consecutive duplicates while preserving order !
distinct_dates_without_days = [x[0] for x in groupby(dates_without_days)]
#print distinct_dates_without_days
distinct_dates_without_days_counts = [dates_without_days.count(x) for x in distinct_dates_without_days]
#print distinct_dates_without_days_counts
title = 'Last asked without answers histogram' if questions_without_answers_only else 'Last asked histogram'
_plot_bar(
x=distinct_dates_without_days,
y=distinct_dates_without_days_counts,
filename=filename,
title=title,
xlabel='Last asked',
ylabel='Frequency'
)
def create_answer_histogram(quora_data):
# List of answer counts (counters counting number of answers for particular question)
answer_counts = []
for document in quora_data:
question = _get_question(document)
answer_count = document[question]['answer_count']
#print answer_count
answer_counts.append(answer_count)
#print answer_counts
_plot_histogram(
x=answer_counts,
filename='answer_count_histogram.png',
title='Answer count histogram',
xlabel='Answer count',
ylabel='Frequency',
bins=10
)
# Analyzing topics / tags
def analyze_topics(quora_data):
all_topics = set([])
for document in quora_data:
question = _get_question(document)
topics = document[question]['topics']
for topic in topics:
all_topics.add(topic)
# Sorting topics/tags and printing them out
sorted_topics = sorted(list(all_topics))
print '-------------------------------------'
print 'List of topics in alphabetical order:'
for topic in sorted_topics:
print topic
# Analyzing which topics are occurring the most frequently
def analyze_topics_frequency(quora_data):
topics_frequencies = defaultdict(int)
for document in quora_data:
question = _get_question(document)
topics = document[question]['topics']
for topic in topics:
topics_frequencies[topic] += 1
for topic in sorted(topics_frequencies, key=topics_frequencies.get, reverse=True):
print topic, topics_frequencies[topic]
# Sorting users be specific attribute attribute
def users_by_attribute(quora_data, attribute):
count_by_attribute = {}
for document in quora_data:
username = _get_username(document)
count_by_attribute[username] = document[username][attribute]
with open(os.path.join('results', 'users_%s.txt' % attribute), 'w') as f:
for user in sorted(count_by_attribute, key=count_by_attribute.get, reverse=True):
f.write(user + ': ' + str(count_by_attribute[user]) + '\n')
# Questions by attribute
def questions_by_attribute(quora_data, attribute, f):
count_by_attribute = {}
for document in quora_data:
question = _get_question(document)
count_by_attribute[question] = (f(document[question][attribute]), document[question][attribute])
#print document[question][attribute]
with open(os.path.join('results', 'questions_%s.txt' % attribute), 'w') as f:
for question in sorted(count_by_attribute.items(), key=lambda x: x[1][0], reverse=True):
f.write(str(question) + '\n')
## Starting with root_question e.g. 'what-is-terrorism' graph of questions is explored
## and the goal is to find a path through the graph leading to question with particular
## topic/tag like 'Astronomy'
def explore_questions_by_topic(quora_data, root_question, topic):
already_explored_questions = set()
d = {}
for document in quora_data:
question = _get_question(document)
d[question] = document[question]
d[question]['related_questions'] = [_sanitize_question(x) for x in d[question]['related_questions']]
questions_queue = []
already_explored_questions.add(root_question)
questions_queue.append([root_question])
questions_path = []
while questions_queue:
# get the first path from the queue
questions_path = questions_queue.pop(0)
# get the last node from the path
question = questions_path[-1]
# questions_path found
print d[question]['topics']
if topic in d[question]['topics']:
break
# enumerate all adjacent nodes, construct a new path and push it into the queue
for related_question in d[question]['related_questions']:
if related_question not in already_explored_questions and related_question in d:
already_explored_questions.add(related_question)
new_questions_path = list(questions_path)
new_questions_path.append(related_question)
questions_queue.append(new_questions_path)
print 'Path leading to %s : %r' % (topic, questions_path)
styles = {
'graph': {
'label': 'Graph',
'fontsize': '12',
'fontcolor': 'white',
'bgcolor': '#888888',
'overlap': 'prism',
'outputorder': 'edgesfirst'
# 'rankdir': 'BT'
},
'nodes': {
'fontname': 'Helvetica',
'shape': 'hexagon',
'fontcolor': 'white',
'color': 'white',
'style': 'filled',
'fillcolor': '#006699',
},
'edges': {
'color': 'black',
'arrowhead': 'open',
'fontname': 'Courier',
'fontsize': '12',
'fontcolor': 'white',
}
}
# Visualizing topics using graphviz
# Topics appearing in the same question are linked together
def visualize_topics(quora_data):
dot = Graph(comment='Topics graph', engine='sfdp')
seen_topics = set()
for document in quora_data:
question = _get_question(document)
topics = document[question]['topics']
# Iterating over topics and adding nodes for topics if necessary
for topic in topics:
if topic not in seen_topics:
dot.node(topic, label=topic)
seen_topics.add(topic)
# Iterating over topics and adding edges between topics belonging to the same question
for i in xrange(len(topics)):
for j in xrange(i+1, len(topics)):
dot.edge(topics[i], topics[j])
# topic1, topic2 in product(topics, topics):
# dot.edge(topic1, topic2)
dot = _apply_styles(dot, styles)
# print dot.source
dot.render(os.path.join('images', 'topics.gv'), view=True)
# Visualizing network of users (by using followers/following relationship) using graphviz
def visualize_users(quora_data):
dot = Digraph(comment='Users subgraph', engine='sfdp')
seen_users = set()
for document in quora_data:
username = _get_username(document)
# Checking if user was already added to the graph
if username not in seen_users:
# Adding user to graph as node
dot.node(username, label=username)
seen_users.add(username)
for document in quora_data:
username = _get_username(document)
# Traversing over following users and adding edge
for following in document[username]['following']:
following_sanitized = _sanitize_username(following)
if following_sanitized in seen_users:
dot.edge(username, following_sanitized)
# Traversing over user's followers
for follower in document[username]['followers']:
follower_sanitized = _sanitize_username(follower)
if follower_sanitized in seen_users:
dot.edge(follower_sanitized, username)
dot = _apply_styles(dot, styles)
# print dot.source
dot.render(os.path.join('images', 'users.gv'), view=True)
# Visualizing network of users created based on questions and answers i.e.
# There is a link from question author to guy who answered this question
def visualize_questions_and_answers_authors(quora_data):
dot = Digraph(comment='Authors subgraph', engine='sfdp')
seen_authors = set()
for document in quora_data:
question = _get_question(document)
question_author = document[question]['question_author']
if question_author not in seen_authors:
if is_valid_author(question_author):
seen_authors.add(question_author)
dot.node(question_author, label=question_author)
answers_authors = document[question]['answers_authors']
for answer_author in answers_authors:
if answer_author not in seen_authors:
if is_valid_author(answer_author):
seen_authors.add(answer_author)
dot.node(answer_author, label=answer_author)
if question_author in seen_authors and answer_author in seen_authors:
dot.edge(question_author, answer_author)
dot = _apply_styles(dot, styles)
dot.render(os.path.join('images', 'authors.gv'), view=True)
def is_valid_author(authorname):
return True
# Uncomment this below to find only authors with normal names
#return authorname != 'Anonymous' and authorname != '' and authorname != 'User' and authorname !='Quora-User'
def _apply_styles(graph, styles):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
def _plot_bar(x, y, filename='tmp.png', title='title', xlabel='X', ylabel='Y'):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Some dummy values that will be replaced by labels when plotting
x_indexes = range(len(x))
plt.bar(x_indexes, y, align='center')
plt.xticks(x_indexes, x, rotation=70)
plt.gcf().set_size_inches(20, 12)
plt.savefig(os.path.join('images', filename), dpi=100)
plt.show()
def _plot_histogram(x, filename='tmp.png', title='title', xlabel='X', ylabel='Y', bins=5):
plt.hist(x, bins=bins)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.gcf().set_size_inches(20, 12)
plt.savefig(os.path.join('images', filename), dpi=100)
plt.show()
def _parse_date(last_asked):
"""
:param last_asked: Quora funky way of saying you when the question was submitted - format is roughly unknown
:return: When question was asked (date) in format (DAY, MONTH, YEAR)
"""
datetime = last_asked.strip().split()
# Only day of the week is provided like: Mon
if len(datetime) == 1:
last_weekday = date.today() + relativedelta(weekday=WEEKDAYS[datetime[0]])
day = last_weekday.day
month = last_weekday.month
year = last_weekday.year
if len(datetime) >= 2:
day = int(datetime[0])
if datetime[1] in MONTHS:
month = MONTHS[datetime[1]]
year = CURR_YEAR
# Expecting this type of date: 12 Jun
if len(datetime) == 2:
year = CURR_YEAR
# Expecting date with year: 3 Mar 1991
elif len(datetime) == 3:
year = int(datetime[2])
return Date(day, month, year)
def _get_question(document):
return _get(document)
def _get_username(document):
return _get(document)
def _get(document):
keys = document.keys()
if keys[0] != '_id':
return keys[0]
else:
return keys[1]
def main():
connection_str = 'mongodb://localhost:27017/'
quora_db = 'quora'
client = pymongo.MongoClient(connection_str)
db = client[quora_db]
questions_data = list(db.questions.find())
users_data = list(db.users.find())
answers_data = list(db.answers.find())
# create_date_histogram(questions_data)
# # Considering only questions that have no answers
# create_date_histogram(
# questions_data,
# questions_without_answers_only=True,
# filename='date_histogram_without_answers_only.png'
# )
# create_answer_histogram(quora_data)
# analyze_topics(questions_data)
# analyze_topics_frequency(questions_data)
# visualize_topics(questions_data)
# visualize_users(users_data)
# visualize_questions_and_answers_authors(answers_data)
# users_by_attribute(users_data, 'answers')
# users_by_attribute(users_data, 'questions')
# users_by_attribute(users_data, 'edits')
# users_by_attribute(users_data, 'following_count')
# users_by_attribute(users_data, 'followers_count')
# questions_by_attribute(questions_data, 'topics', len)
#explore_questions_by_topic(questions_data, 'What-is-terrorism', 'Astronomy')
#explore_questions_by_topic(questions_data, 'What-is-terrorism', 'Communist Party of China')
explore_questions_by_topic(questions_data, 'What-is-terrorism', 'Google')
#print quora_data
#print quora_data[0]
if __name__ == '__main__':
main()
|
"""
给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。
示例 1:
输入: "babad"
输出: "bab"
注意: "aba" 也是一个有效答案。
示例 2:
输入: "cbbd"
输出: "bb"
"""
class Solution:
def longestPalindrome(self, s: str) -> str:
"""
中心扩散法求解,在这里的中心数量有2n-1个,因为中心点可以是一个字母,也可以是两个字母。因此我们要将这
2n-1个中心点考虑进去
:param s:
:return:
"""
result = ""
length = len(s)
for center in range(length * 2 - 1):
left = center // 2
right = left + center % 2
while left >= 0 and right < length and s[left] == s[right]:
if (right - left + 1) > len(result):
result = s[left: right + 1]
left -= 1
right += 1
return result
s = Solution()
res = s.longestPalindrome("abadbbd")
print(res) |
"""Datetime-related utility functions."""
from datetime import datetime, timedelta, date
import math
from ..timestamp import get_timestamp
from ..datetime import (
utc_time,
datetime_to_dateint,
)
from ..constants import (
WEEKDAYS,
)
def decompose_dateint(dateint):
"""Decomposes the given dateint into its year, month and day components.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
year : int
The year component of the given dateint.
month : int
The month component of the given dateint.
day : int
The day component of the given dateint.
"""
year = int(dateint / 10000)
leftover = dateint - year * 10000
month = int(leftover / 100)
day = leftover - month * 100
return year, month, day
def dateint_to_date(dateint):
"""Converts the given integer to a datetime.date object.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
datetime.date
The corresponding date object.
Example
-------
>>> dateint_to_date(20170223)
datetime.date(2017, 2, 23)
"""
return date(*decompose_dateint(dateint))
def tz_aware_dateint_to_timestamp(dateint, timezone_name):
"""Returns the epoch timestamp for the given timezone and dateint.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
timezone_name : str
The name of the timezone.
Returns
-------
int
The timestamp corresponding to the start of the given day (so at 0
hours, 0 minutes, etc...) at the given timezone.
"""
return get_timestamp(timezone_name, *decompose_dateint(dateint))
def dateint_to_timestamp(dateint):
"""Converts the given dateint to a timestamp, using the local timezone.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
int
The timestamp corresponding to the start of the given day (so at 0
hours, 0 minutes, etc...) at the local timezone.
"""
return int(dateint_to_datetime(dateint).timestamp())
def dateint_to_utc_timestamp(dateint):
"""Converts the given dateint to the corresponding UTC timestamp.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
int
The UTC timestamp corresponding to the start of the given day (so at 0
hours, 0 minutes, etc...).
"""
return tz_aware_dateint_to_timestamp(dateint, 'UTC')
def dateint_to_datetime(dateint):
"""Converts the given dateint to a datetime object, in local timezone.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
datetime.datetime
A timezone-unaware datetime object representing the start of the given
day (so at 0 hours, 0 minutes, etc...) in the local timezone.
"""
if len(str(dateint)) != 8:
raise ValueError(
'Dateints must have exactly 8 digits; the first four representing '
'the year, the next two the months, and the last two the days.')
year, month, day = decompose_dateint(dateint)
return datetime(year=year, month=month, day=day)
def dateint_to_weekday(dateint, first_day='Monday'):
"""Returns the weekday of the given dateint.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
first_day : str, default 'Monday'
The first day of the week.
Returns
-------
int
The weekday of the given dateint, when first day of the week = 0,
last day of the week = 6.
Example
-------
>>> dateint_to_weekday(20170213)
0
>>> dateint_to_weekday(20170212)
6
>>> dateint_to_weekday(20170214)
1
>>> dateint_to_weekday(20170212, 'Sunday)
0
>>> dateint_to_weekday(20170214, 'Sunday')
2
"""
weekday_ix = dateint_to_datetime(dateint).weekday()
return (weekday_ix - WEEKDAYS.index(first_day)) % 7
def dateint_to_weekday_name(dateint):
"""Returns the weekday of the given dateint.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
str
The weekday name of the given dateint.
Example
-------
>>> dateint_to_weekday_name(20170213)
'Monday'
>>> dateint_to_weekday_name(20170212)
'Sunday'
>>> dateint_to_weekday_name(20170214)
'Tuesday'
"""
return dateint_to_datetime(dateint).strftime("%A")
def shift_dateint(dateint, day_shift):
"""Shifts the given dateint by the given amount of days.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
days : int
The number of days to shift the given dateint by. A negative number
shifts the dateint backwards.
Returns
-------
int
A dateint corresponding to the given date shifted by the given amount
of days.
Example
-------
>>> shift_dateint(20170228, 1)
20170301
>>> shift_dateint(20170301, -1)
20170228
>>> shift_dateint(20170220, 5)
20170225
"""
dtime = dateint_to_datetime(dateint)
delta = timedelta(days=abs(day_shift))
if day_shift > 0:
dtime = dtime + delta
else:
dtime = dtime - delta
return datetime_to_dateint(dtime)
def dateint_range(first_dateint, last_dateint):
"""Returns all dateints in the given dateint range.
Arguments
---------
first_dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
last_dateint : int
An integer object decipting a specific calendaric day; e.g. 20170108.
Returns
-------
iterable
An iterable of ints representing all days in the given dateint range.
Example
-------
>>> dateint_range(20170228, 20170301)
[20170228, 20170301]
>>> dateint_range(20170225, 20170301)
[20170225, 20170226, 20170227, 20170228, 20170301]
"""
first_datetime = dateint_to_datetime(first_dateint)
last_datetime = dateint_to_datetime(last_dateint)
delta = last_datetime - first_datetime
delta_in_hours = math.ceil(delta.total_seconds() / 3600)
delta_in_days = math.ceil(delta_in_hours / 24) + 1
dateint_set = set()
for delta_i in range(0, delta_in_days * 24, 24):
datetime_i = first_datetime + timedelta(hours=delta_i)
dateint_i = datetime_to_dateint(datetime_i)
if dateint_i <= last_dateint:
dateint_set.add(dateint_i)
return sorted(dateint_set)
def today_int():
"""Returns the dateint for today."""
return datetime_to_dateint(utc_time())
def dateint_week_by_dateint(dateint, first_day='Monday'):
"""Return a dateint range of the week the given dateint belongs to.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
first_day : str, default 'Monday'
The first day of the week.
Returns
-------
iterable
An iterable of dateint representing all days of the week the given
dateint belongs to.
"""
weekday_ix = dateint_to_weekday(dateint, first_day)
first_day_dateint = shift_dateint(dateint, -weekday_ix)
last_day_dateint = shift_dateint(first_day_dateint, 6)
return dateint_range(first_day_dateint, last_day_dateint)
def dateint_difference(dateint1, dateint2):
"""Return the difference between two dateints in days.
Arguments
---------
dateint1 : int
An integer object decipting a specific calendaric day; e.g. 20161225.
dateint2 : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
int
The difference between the two given dateints in days.
"""
dt1 = dateint_to_datetime(dateint1)
dt2 = dateint_to_datetime(dateint2)
delta = dt1 - dt2
return abs(delta.days)
|
# Generated by Django 2.2.13 on 2020-07-02 17:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('groups', '0003_auto_20200603_1933'),
('lemmatized_text', '0009_lemmatizedtext_original_text'),
]
operations = [
migrations.AddField(
model_name='lemmatizedtext',
name='cloned_for',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='groups.Group'),
),
]
|
from __future__ import division
from qrel_chk import Qrel
import threading
import futils
import sys
import getopt
from collections import defaultdict
import math
import numpy as np
from numpy import linalg as LA
from scipy.optimize import minimize
class RBPOpt (threading.Thread):
"""
optimizing RBP to d value.
The RMSE is calculated per run, for the final score.
only on document that are judged.
minimizing the ``fitted'' guessing value
and the real judged ones.
Use geometric combination.
There is no difference bust only the geometric combination.
Seperate the file only for running purpose.
"""
def __init__(self, p, d, q, qrelname,
fitted_vec, rank_dir,
method, out_dir, is_binary=True):
"""
init the opt process
:param p: persistance values
:param d: considered pooling depth.
:param q: qid.
:param qrelname: qrel name
:param fitted_vec: fitted_vector for method
:param rank_dir: dir of rank mat
:param method: method idx
:param out_dir: output dir
:param: is_binary: True
"""
threading.Thread.__init__(self)
self._outname = out_dir + "opt-weight-"+str(method)+".txt"
self._rmse = out_dir + "opt-rmse-" + str(method) + ".txt"
self._k = d
self._q = q
self._qrel = Qrel(qrelname).get_rel_by_qid(q)
self._p = p
tmp_rank_mat, self._runnum = futils.read_csv_to_dict(rank_dir + str(q) + "-rank.txt", is_prob=False)
self._rank_bg = fitted_vec
self._rbp = np.zeros(self._runnum)
self._bg_vectors = np.zeros((self._k, self._runnum, self._runnum))
self._bg_rbp = np.zeros((self._k, self._runnum))
self._binary = is_binary
# load the rank matrix
for k, v in tmp_rank_mat.iteritems():
tmp_v = np.array(v) # convert to np array for processing.
is_judged = False
curr_rel = 0
if k in self._qrel:
if self._qrel[k] > 0:
curr_rel = 1 if self._binary else self._qrel[k]
is_judged = True
if min(tmp_v) < self._k and max(tmp_v) > -1: # this document is retrieved by one of the system
tmp = self._rank_bg[tmp_v]
for i in range(0, len(tmp_v)):
if 0 <= tmp_v[i] < self._k:
self._rbp[i] += curr_rel * math.pow(self._p, tmp_v[i])
self._bg_rbp[tmp_v[i], i] = curr_rel * math.pow(self._p, tmp_v[i])
if is_judged:
self._bg_vectors[tmp_v[i], i, :] = tmp # set the fitted vector to judged documents
def _est_func_geo(self, w):
"""
object function for the optimization.
for optimizing score and using linear combination
:param method:
:return:
"""
fitted_rbp = np.zeros(len(w))
k = self._bg_vectors.shape[0]
for i in range(0, k): # iterating down to the rank
curr_rank = self._bg_vectors[i, :, :]
for j in range(0, self._runnum):
tmp = curr_rank[j, :]
tmp[tmp == 0] = 10 ** -6
tmp = np.log(tmp)
fitted_rbp[j] += math.pow(self._p, i) * np.exp(np.dot(w, tmp)) # get to the real bg of the document
return LA.norm(fitted_rbp-self._rbp)
def get_weight(self):
w0 = np.ones(self._runnum)
w0 /= self._runnum
cons = ({'type': 'eq', 'fun': lambda x: 1 - sum(x)})
# For linear and per score
bnds = tuple((0,1) for x in w0)
res = minimize(self._est_func_geo, w0, method='SLSQP', constraints=cons, bounds=bnds)
# --------------------------------------
res_str = str(self._q)
for i in range(0,len(res.x)):
res_str += ",{:.6f}".format(res.x[i])
with open(self._outname, 'a') as fout:
fout.write(res_str.strip() + "\n")
with open(self._rmse,'a') as fout:
fout.write(str(self._q) + ", {:.4f}".format(res.fun).strip() + "\n")
def run(self):
self.get_weight()
def get_doc_prob(qid, out_dir, rank_dir, fitted_dir, m = 4):
"""
output final estimation based on the weighting param
:param qrelname: qrel name
:param out_dir: output dir, same as the previous used one
:param rank_dir: rank-mat dir
:param fitted_dir: fitted vector dir
:param m: number of method
:return:
"""
runnum = 100
param_mat = np.zeros((m, len(qid), runnum)) # shrink later
for i in range(0, m):
curr_mat = np.loadtxt(out_dir + "opt-weight-"+str(i+1)+".txt", delimiter=",", dtype=float)
if runnum >= curr_mat.shape[1]:
runnum = curr_mat.shape[1] - 1
param_mat = param_mat[:, :, 0:runnum]
param_mat[i, :, :] = curr_mat[:, 1:]
for q in range(0, len(qid)):
doc_prob = defaultdict(list)
rank_mat, runnum = futils.read_csv_to_dict(rank_dir + str(qid[q]) + "-rank.txt", is_prob=False)
fit_mat = np.loadtxt(fitted_dir + str(qid[q]) + ".txt", delimiter=" ", dtype=float)
for doc , rank in rank_mat.iteritems():
if doc not in doc_prob:
doc_prob[doc] = [0] * m
for i in range(0, m):
curr_gain = fit_mat[:, i + 1]
tmp_gain = curr_gain[np.array(rank)]
tmp_gain[tmp_gain == 0] = 10 ** -6
doc_prob[doc][i] = np.exp(np.dot(param_mat[i, q, :], np.log(tmp_gain)))
with open(out_dir + str(qid[q]) + "-prob.txt","a") as fout:
for k,v in doc_prob.items():
curr_str = str(k)
for p in v:
curr_str += ", {:.4f}".format(p)
fout.write(curr_str.strip() + "\n")
def main(argv):
method = 3
qrelfile = ""
depth = 10
collection = "rob04"
# pd = 100
# qid = 651
try:
opts, args = getopt.getopt(argv, "j:d:hc:", ["runf", "jfile", "depth"])
except getopt.GetoptError:
print('-j <qrelfile> -d <depth> -h help')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('-j <qrelfile> -c <collection> -d <depth> -h help')
sys.exit()
elif opt in ("-j", "--jfile"):
qrelfile = arg
elif opt in ("-d", "--d"):
depth = int(arg)
elif opt in ("-c", "--c"):
collection = arg
prefix_dir = "testcase/"
#
rank_dir = prefix_dir + collection + "/doc_rank/"
fit_dir = prefix_dir + collection + "/background_gain/fit/origin/" + str(depth) + "/"
out_dir = prefix_dir + collection + "/background_gain/opt_geo/" + str(depth) + "/"
curr_qrel = Qrel(qrelfile)
qid = curr_qrel.get_qid()
tlist = []
p = 0.95
for q in qid:
fit_mat = np.loadtxt(fit_dir+str(q)+".txt", delimiter=" ", dtype=float)
for i in range(1, 5):
curr_opt = RBPOpt(p, 1000, q, qrelfile, fit_mat[:, i], rank_dir, i, out_dir)
curr_opt.start()
tlist.append(curr_opt)
for t in tlist:
t.join()
get_doc_prob(qid, out_dir, rank_dir, fit_dir)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python2
#coding:utf-8
from Tkinter import *
from ttk import *
from tkMessageBox import *
from collections import OrderedDict
from Constants import *
import json
import sys
#TODO traceback
class ConfigApp(Frame):
def __init__(self,master=None):
Frame.__init__(self,master)
self.DEBUG = True
#入力エントリ生成メソッド用のクラス変数
self.numEntryRow = 0
#入力エントリを配置するフレーム
self.entryFrame = Frame(root,padding=10)
self.entryFrame.grid()
#入力値を格納する辞書
self.inputDict = OrderedDict()
self.boundKeyList = [
UPBOUNDCAREFUL,
UPBOUNDSAFETY,
LWBOUNDSAFETY,
LWBOUNDCAREFUL
]
self.intKeyList = [
INTERVAL
]
self.stringKeyList = [
FROMADDR,
PASSWORD,
TOADDR
]
#辞書の値を初期化
for k in self.boundKeyList:
self.inputDict[k] = StringVar()
for k in self.intKeyList:
self.inputDict[k] = StringVar()
for k in self.stringKeyList:
self.inputDict[k] = StringVar()
self.inputDict[FROMADDR].set("@gmail.com")
#GUI上の入力エントリ生成
for key,value in self.inputDict.items():
self.genEntry(key,value)
#保存ボタン
saveButton = Button(root,text="SAVE",command=self.save)
saveButton.grid()
#二次元空間上に配置
self.grid()
#入力エントリを生成するメソッド
def genEntry(self,text,varname):
tmpLabel = Label(self.entryFrame,text = text,padding=5)
tmpLabel.grid(row=self.numEntryRow,column=0,sticky=E)
tmpEntry = Entry(self.entryFrame,textvariable=varname,width=40)
tmpEntry.grid(row=self.numEntryRow,column=1)
self.numEntryRow = self.numEntryRow + 1
#保存ボタンを押した際に呼び出すメソッド
def save(self):
for k,v in self.inputDict.items():
#空白文字を削除しておく
v.set("".join(v.get().split()))
#値が入力されているか確認
if v.get() == "":
msg = "input \""+k+"\""
self.dbg(msg)
showerror("Error",msg)
return None
#境界値がfloatに変換できるかチェック
try:
boundVals = []
for k in self.boundKeyList:
boundVals.append(float(self.inputDict[k].get()))
except ValueError:
msg = "\""+k+"\"" + "can not be conveted to number"
self.dbg(msg)
return None
except :
#TODO msg
self.dbg(msg)
self.dbg(Exception)
sys.exit()
#整数入力値がintに変換できるかチェック
try:
intVals = []
for k in self.intKeyList:
intVals.append(int(self.inputDict[k].get()))
except ValueError:
msg = "\""+k+"\"" + "can not be conveted to integer"
self.dbg(msg)
return None
except Exception:
msg = "An unknown error has occurred"
self.dbg(msg)
self.dbg(Exception)
sys.exit()
#TODO msg
#境界値の大小チェック
if not(boundVals[0] > boundVals[1]
and boundVals[1] > boundVals[2]
and boundVals[2] > boundVals[3] ):
#TODO msg
self.dbg("incorrect bound value")
return None
try:
#json形式にして書き込み
outputDict = OrderedDict()
for k,v in zip(self.boundKeyList,boundVals):
outputDict[k] = v
for k,v in zip(self.intKeyList,intVals):
outputDict[k] = v
for k in self.stringKeyList:
outputDict[k] = self.inputDict[k].get()
with open("config.json","w") as f:
json.dump(outputDict,f,indent=4)
self.dbg("complete")
showinfo("message","succeeded to save the configurations!")
except IOError:
self.dbg("An IO error has occurred")
#TODO msg
def dbg(self,something):
if self.DEBUG == True:
print(something)
#ルートフレーム
root = Tk()
root.title(u"config")
app = ConfigApp(master=root)
app.mainloop() |
WITH_LIMIT = u'''
LIMIT %s
'''
WITH_OFFSET = u'''
OFFSET %s
'''
WITH_DESC = u'''
DESC
''' |
tax_brackets = [
{
"lowest_income": 0.00,
"greatest_income": 47630.00,
"line2": 0.00,
"line4": 0.15,
"line6": 0.00,
},
{
"lowest_income": 47630.01,
"greatest_income": 95259.00,
"line2": 47630.00,
"line4": 0.205,
"line6": 7144.50,
},
{
"lowest_income": 95259.01,
"greatest_income": 147667.00,
"line2": 95259.00,
"line4": 0.26,
"line6": 16908.445,
},
{
"lowest_income": 147667.01,
"greatest_income": 210371.00,
"line2": 147667.00,
"line4": 0.29,
"line6": 30534.525,
},
{
"lowest_income": 210371.01,
"line2": 210371.00,
"line4": 0.33,
"line6": 48718.685,
}
]
def bracket_establisher(income):
if type(income) != float and type(income) != int or income < 0:
return f"{income} is not a valid number! Please re-enter."
else:
income = float(income)
if income > tax_brackets[0]['lowest_income'] and income <= tax_brackets[0]['greatest_income']:
return tax_owing_calc(income, tax_brackets[0]['line2'], tax_brackets[0]['line4'], tax_brackets[0]['line6'])
if income >= tax_brackets[1]['lowest_income'] and income <= tax_brackets[1]['greatest_income']:
return tax_owing_calc(income, tax_brackets[1]['line2'], tax_brackets[1]['line4'], tax_brackets[1]['line6'])
if income >= tax_brackets[2]['lowest_income'] and income <= tax_brackets[2]['greatest_income']:
return tax_owing_calc(income, tax_brackets[2]['line2'], tax_brackets[2]['line4'], tax_brackets[2]['line6'])
if income >= tax_brackets[3]['lowest_income'] and income <= tax_brackets[3]['greatest_income']:
return tax_owing_calc(income, tax_brackets[3]['line2'], tax_brackets[3]['line4'], tax_brackets[3]['line6'])
if income >= tax_brackets[4]['lowest_income']:
return tax_owing_calc(income, tax_brackets[4]['line2'], tax_brackets[4]['line4'], tax_brackets[4]['line6'])
def tax_owing_calc(income, base, rate, tax):
tax_owing = (((income - base) * rate)) + tax
return f"Your tax owing is ${tax_owing: .2f}."
|
# __eq__ & is vs == | Python Quick Tips
class Dog:
def __init__(self, name):
self.name = name
def __eq__(self, other):
if isinstance(other, Dog):
if other.name == self.name:
return True
tim = Dog("tim")
joe = Dog("tim")
kim = tim
print(tim == joe)
print(tim is joe)
print(tim is kim)
class Cat:
print(tim is joe)
def __init__(self, name):
self.name = name
jim = Cat("jim")
tom = Cat("jim")
print(jim == tom)
x = [1, 2]
y = [1, 2]
print(x == y)
print(x is y)
print(id(x))
print(id(y))
x = [1, 2]
y = x
print(x is y)
print(id(x))
print(id(y))
|
from tkinter import *
class Message:
def __init__(self, messages):
window = Tk()
window.title("Rotating Message")
self.messages = messages
self.canvas = Canvas(window, height = 250, width = 300, bg = "white")
self.canvas.pack(fill = Y)
self.index = 0
self.canvas.create_text(150, 125, text = self.messages[self.index], tags = "words")
self.canvas.bind("<Button-1>", self.processMouseEvent)
window.mainloop()
def processMouseEvent(self, event):
self.canvas.delete("words")
self.index = (self.index + 1) % len(self.messages)
self.canvas.create_text(150,125, text = self.messages[self.index], tags = "words")
Message(["Programming is fun", "It is fun to program", "I sure like programming"])
|
from typing import List, Optional
from ray.data.datasource import (
DefaultFileMetadataProvider,
DefaultParquetMetadataProvider,
FastFileMetadataProvider,
)
from ray.data.datasource.image_datasource import _ImageFileMetadataProvider
def get_generic_metadata_provider(file_extensions: Optional[List[str]]):
# Used by all other file-based `read_*` APIs
return DefaultFileMetadataProvider()
def get_parquet_metadata_provider():
# Used by `read_parquet`
return DefaultParquetMetadataProvider()
def get_parquet_bulk_metadata_provider():
# Used by `read_parquet_bulk`
return FastFileMetadataProvider()
def get_image_metadata_provider():
# Used by `read_images`
return _ImageFileMetadataProvider()
|
def brute_force(s, idx):
s = s[:idx]
s = s.replace(" ", "%20")
return s
def execute(s, idx):
return brute_force(s, idx)
if __name__ == "__main__":
print(execute("hello world ", 11))
print(execute("hello world ", 11))
print(execute("hello world kazuki ", 18))
|
"""
Given a string and an integer k, you need to reverse the first k characters for
every 2k characters counting from the start of the string. If there are less than k characters left,
reverse all of them. If there are less than 2k but greater than or equal to k characters, then reverse the
first k characters and left the other as original.
Example:
Input: s = "abcdefg", k = 2
Output: "bacdfeg"
"""
def reverseStr(s, k):
"""
:param s: str
:param k: int
:return: str
"""
i = 0
result = ""
while i <= len(s):
result += s[i:i+k][::-1]
result += s[i+k: i + 2 * k]
i = i + 2 * k
return result
print(reverseStr('abcdefg', 2))
print(reverseStr("cam", 3))
l = 'abcd'
print(l[0:2][::-1]) |
no_of_lines = int(input())
total = 0
for i in range(no_of_lines):
s = input()
if '++' in s:
total += 1
elif '--' in s:
total -= 1
print(total)
|
response.menu = []
inicio = URL('default','index')
errorUsuario = "usuario"
errorEstado = "estado"
estadoInicial = "En-linea"
errorUsuInvalido = "invalido"
def is_session():
return True if auth.is_logged_in() else False
if is_session():
idUser = auth.user.id
nameUser = "%s %s" %(auth.user.first_name,auth.user.last_name)
emailUser = auth.user.email
grup = auth.user.tipo
estado = auth.user.registration_key
extUser = auth.user.sal_llamada
sucursalUsuario = auth.user.sucursal
pass
def nombreEmpleado():
nombre = auth.user.first_name
return nombre
def datosEmpleado( idUsuario ):
dbEmpl = db.empleados
dbUsuEmpl = db.usuario_empleado
tmpData = db( dbUsuEmpl.usuario_empleado_usuario == idUsuario ).select( dbUsuEmpl.usuario_empleado_empleado ).last()
if tmpData:
tmpNom = db( dbEmpl.id == tmpData.usuario_empleado_empleado ).select( dbEmpl.empleados_nombres ).last()
if tmpNom:
dato = tmpNom.empleados_nombres
else:
dato = nombreEmpleado()
pass
else:
dato = nombreEmpleado()
pass
return dato
def plantilla():
if grup==['Coordinador']:
plantilla = 'templateCoorInit.html'
elif grup==['Director']:
plantilla = 'templateDirInit.html'
elif grup==['Asesor']:
plantilla = 'templateAseInit.html'
else:
plantilla = 'templateInit.html'
pass
return plantilla
def estadoServicioAsesor():
multi_users = db.auth_user
estadoAsesor = db( multi_users.id == idUser ).select().last()['estado_servicio']
return estadoAsesor
def codigoPais():
paisId = db( db.empresas.id == empresaUsuario ).select( db.empresas.empresas_pais ).last()['empresas_pais']
codigoPais = db( db.paises.id == paisId ).select().last()['paises_codigo']
return codigoPais
|
# Generate a list containing factors of num.
#indicate variable "num"
num = 34
#extract number 1 to "num" using "i" and "for" circle
for i in range(1, num+1):
if num % i == 0:
print(i, end=' ')
|
# -*- coding: utf-8 -*-
stuff={'name':'Zed','age':'36','height':'6*12+2'}
for (x,y) in stuff.items():
print "%s:" %x,y
print "-"*15
for x in stuff:
print "%s:" %x,stuff[x]
print "-"*15
print stuff.items()
print stuff |
def add(array):
for i in range(len(array)):
for j in range(len(array[i])):
array[i][j] = array[i][j]+1
def add_V2(array):
new_array = []
for i in range(len(array)):
new_list = []
for j in range(len(array[i])):
new_list.append(array[i][j]+1)
new_array.append(new_list)
return new_array
def main():
array = []
print("""Input the array elements with spaces between columns.
One row per line, and an empty line at the end.""")
while True:
x = input()
if x =="":
break
else:
List = []
y = x.strip().split()
for i in y:
List.append(int(i))
array.append(List)
print("The array is:")
print(array)
print("After executing the function add, the array is:")
add(array)
print(array)
print("A new array created with add_V2:")
newest_array = add_V2(array)
print(newest_array)
print("After executing the function add_V2, the initial array is:")
print(array)
main()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''Script that calculates F-score by sentence length for number of input-files or folders
Creates a plot using matplotlib'''
import argparse
import os
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def create_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_files", required=True, nargs="*",
help="Files with all individual F-scores")
parser.add_argument("-n", "--names", default='', nargs="*",
help="Names of the experiment, if not added take from filenames")
parser.add_argument("-s", "--sentences", required=True, type=str,
help="File with the tokenized (!) sentences")
parser.add_argument("-m", "--min_occurrence", default=8, type=int,
help="Minimum number of times a certain sen-length should occur")
parser.add_argument("-o", "--output_file", default='plot.pdf', type=str,
help="Location of output-file (default plot.pdf)")
parser.add_argument("-noc", "--no_colors", action="store_true",
help="Don't add colors in the PDF")
args = parser.parse_args()
return args
def get_sen_lengths(sent_file):
'''Return list of sentences and a list of sentence length per sentence'''
sents = [x.strip() for x in open(sent_file, 'r')]
sen_lengths = [len(sen.split()) for sen in sents]
return sents, sen_lengths
def read_scores(in_f):
'''Read the scores from an input-file'''
if os.path.isfile(in_f):
return [float(x.strip()) for x in open(in_f, 'r')]
raise ValueError("{0} does not exist".format(in_f))
def get_scores(input_files):
'''Get all input files and return the scores"
Also keep track of the file-names if names were not specified'''
scores, file_names = [], []
for in_f in input_files:
if os.path.isfile(in_f):
scores.append(read_scores(in_f))
file_names.append(in_f.split('/')[-1])
return scores, file_names
def check_validity(sents, sen_lengths, scores):
'''Check if all the values have the same lengths'''
assert len(sents) == len(sen_lengths)
for score_list in scores:
assert len(score_list) == len(sents), "{0} vs {1}".format(len(score_list), len(sents))
def get_max_sen_len(sen_lengths, threshold):
'''Get the highest number of sentences for which we still have enough sentences'''
for idx in range(3, max(sen_lengths) + 1):
if sen_lengths.count(idx) < threshold:
return idx - 1 # return previous idx if this one doesn't make it
return ValueError("Did not find a max sen-length -- check value of -m and --min_occurrence: {0}"
.format(threshold))
def create_empty_list(max_sen_len):
'''Create empty lists of lists this way to ensure lists are not linked'''
f_list = [[]]
for _ in range(max_sen_len):
f_list.append([])
return f_list
def avg_per_length(scores, sen_lengths, min_sen_len, max_sen_len):
'''Produce the list with average F-scores per sentence-length for each score-list'''
avg_scores = []
sen_range = range(min_sen_len, max_sen_len + 1)
for score_list in scores:
length_counts = [0] * (max_sen_len + 1)
f_list = create_empty_list(max_sen_len)
# Now fill the list of scores per sen-len
for idx, length in enumerate(sen_lengths):
if length in sen_range:
length_counts[length] += 1
f_list[length].append(score_list[idx])
# Then average all the scores
f_list = f_list[min_sen_len:]
avg_scores.append([float(sum(x)) / float(len(x)) for x in f_list])
return avg_scores, length_counts[min_sen_len:], sen_range
def make_plot(avg_scores, file_names, sen_range, output_file, do_print=False,
no_colors=False, num_sents=[]):
'''Finally make the plot that shows the sentence length files'''
matplotlib.rc('xtick', labelsize=13)
matplotlib.rc('ytick', labelsize=13)
# Start with figure
_, ax = plt.subplots()
# Set colors and styles
line_styles = ["--", "--X", "--^", "--v", "--s", "--o", "--*"]
if no_colors:
line_styles = ['k--', 'k--.', 'k--+', 'k--v', 'k--s', 'k--*']
else:
ax.set_prop_cycle(color=["gold", "cornflowerblue", "lightgreen",
"orange", "lightcoral", "chocolate", "purple"])
# Do the actual plotting here
for idx, (avg_score, name) in enumerate(zip(avg_scores, file_names)):
if do_print:
print('\nF-averages {0}:\n'.format(name), [round(x, 2) for x in avg_score])
plt.plot(sen_range, [float(a) * 100 for a in avg_score],
line_styles[idx], lw=2.5, label=name)
# Add necessary information
# ax.set_ylim([78, 93]) # perhaps needed
plt.xlabel('Document length (tokens)', size=15)
plt.ylabel('F-score', size=16)
# Set legend, perhaps in different location, depends on your graph
plt.legend(loc=(0.105, 0.05), shadow=True, fontsize=13)
# Set double axis maybe
if num_sents:
x_labels = ["{0}\n{1}".format(num, count) for num, count in zip(sen_range, num_sents)]
_ = ax.set(xticks=sen_range, xticklabels=x_labels)
# And save the plot
plt.savefig(output_file, format='pdf', bbox_inches="tight")
def main():
'''Main function for senlen_plot.py'''
# Read in arguments
args = create_arg_parser()
# Get sentences and sentence lengths
sents, sen_lengths = get_sen_lengths(args.sentences)
# Get a list of lists, including all files with all scores
scores, file_names = get_scores(args.input_files)
# If names are not specified, use filenames
if args.names:
file_names = args.names
# Check if all input is valid
check_validity(sents, sen_lengths, scores)
# Get min and max sen-len (max based on minimum number of occurence, min is set)
min_sen_len = 3
max_sen_len = get_max_sen_len(sen_lengths, args.min_occurrence)
# Produce the list with average F-scores per sentence-length for each score-list
avg_scores, length_counts, sen_range = avg_per_length(scores, sen_lengths,
min_sen_len, max_sen_len)
# Now plot the final figure
make_plot(avg_scores, file_names, sen_range, args.output_file,
do_print=True, no_colors=args.no_colors, num_sents=length_counts)
if __name__ == "__main__":
main()
|
# writhe the dictionary into astropytables
# from astropy.table import Table, Column
# t = Table(masked=True)
# t.add_column(Column(name='time', data=time,masked=True, unit='year'))
# t.add_column(Column(name='lon', data=lon,masked=True, unit='degree'))
# t.add_column(Column(name='lat', data=lat,masked=True, unit='lat'))
# t.add_column(Column(name='attribute', data=attribute))
# t.add_column(Column(name='RCP_jja_emissions', data=RCP_jja_emissions,unit='kg m-2 s-1'))
# Write out to file
# t.write('myfile.fits') # also support HDF5, ASCII, etc.
# # Read in from file
# t = table.Table.read('myfile.fits')
# from astropy.table import Table
# t = table.Table.read('myfile.fits')
# data_rows = [(1, 2.0, 'x'),
# (4, 5.0, 'y'),
# (5, 8.2, 'z')]
# t = Table(rows=data_rows, names=('a', 'b', 'c'), meta={'name': 'first table'},
# dtype=('i4', 'f8', 'S1'))
# You can also assign a unit to the columns. If any column has a unit assigned, all units would be shown as follows:
# t['b'].unit = 's'
# you can get summary information about the table as follows:
# t.info
# If you do not like the format of a particular column, you can change it
# t['b'].format = '7.3f'
# For a long table you can scroll up and down through the table one page at time:
# t.more()
# You can also display it as an HTML-formatted table in the browser:
# t.show_in_browser()
# or as an interactive (searchable & sortable) javascript table:
# t.show_in_browser(jsviewer=True)
# examine column names
# t.colnames
# Access the data by column or row using familiar numpy structured array syntax:
# t['a'] # Column 'a'
# You can retrieve a subset of a table by rows (using a slice) or columns (using column names), where the subset is returned as a new table:
# print(t[0:2])
# print(t['a', 'c'])
# Replace, add, remove, and rename columns with the following:
# >>> t['b'] = ['a', 'new', 'dtype'] # Replace column b (different from in place)
# >>> t['d'] = [1, 2, 3] # Add column d
# >>> del t['c'] # Delete column c
# >>> t.rename_column('a', 'A') # Rename column a to A
# Adding a new row of data to the table is as follows:
# >>> t.add_row([-8, -9, 10])
# You can create a table with support for missing values, for example by setting masked=True:
# t = Table([a, b, c], names=('a', 'b', 'c'), masked=True, dtype=('i4', 'f8', 'S1')
# You can include certain object types like Time, SkyCoord or Quantity in your table. These “mixin” columns behave like a hybrid of a regular Column and the native object type (see Mixin columns). For example:
# >>>
# >>> from astropy.time import Time
# >>> from astropy.coordinates import SkyCoord
# >>> tm = Time(['2000:002', '2002:345'])
# >>> sc = SkyCoord([10, 20], [-45, +40], unit='deg')
# >>> t = Table([tm, sc], names=['time', 'skycoord'])
# >>> t
# <Table length=2>
# time skycoord
# deg,deg
# object object
# --------------------- ----------
# 2000:002:00:00:00.000 10.0,-45.0
# 2002:345:00:00:00.000 20.0,40.0
|
# Sierpinski's Gasket
"""
Write a function that takes an integer n and returns the nth iteration of the
fractal known as Sierpinski's Gasket.
Here are the first few iterations. The fractal is composed entirely of L and
white-space characters; each character has one space between it and the next
(or a newline).
"""
def sierpinski(n):
if n == 0:
return 'L'
# elif n == 1:
# return 'L\nL L'
else:
x = sierpinski(n - 1)
m = 2**(n - 1)
s = x + '\n'
y = x.split('\n')
for i in range(m):
# s += y[i] + ' ' * (2 * m - 1 - 2 * i) + y[i]
s += y[i].ljust(2**n) + y[i]
if i != m - 1:
s += '\n'
return s
# return sierpinski(n - 1) + '\n' + "\n".join(
# [sierpinski(n - 1).split('\n')[i].ljust(2**n) +
# sierpinski(n - 1).split('\n')[i] for i in
# range(2**(n - 1))])
# test
print sierpinski(0)
print "==============="
print sierpinski(1)
print "==============="
print sierpinski(2)
print "==============="
print sierpinski(3)
# clever method
"""
def sierpinskiRows(n):
if not n:
return [ 'L' ]
last = sierpinskiRows(n - 1)
return last + [ row.ljust(2 ** n) + row for row in last ]
def sierpinski(n):
return '\n'.join(sierpinskiRows(n))
"""
# clever method 2
"""
def sierpinski(n):
r = ['L']
for i in range(n):
l = len(r)
for j in range(l):
r.append(r[j].ljust(l * 2) + r[j])
return '\n'.join(r)
"""
# My method for short
"""
def sierpinski(n):
if n == 0:
return 'L'
else:
return sierpinski(n - 1) + '\n' + "\n".join(
[sierpinski(n - 1).split('\n')[i].ljust(2**n) +
sierpinski(n - 1).split('\n')[i] for i in
range(2**(n - 1))])
"""
|
"""
xxx,xxx,xxx,xxx
b m t ht
1,239,911
1,xxx,xxx one million
2xx,xxx two hundred thousand ---> don't need to repeat "thousand"
3x,xxx thirty thousand --->
9,xxx nine thousand --->
9xx nine hundred
1x ten ---> NOT ten one, it's eleven (damn you english)
1 one --->
1,239,911 1,000,000 // 1,000,000 = 1
1,239,911 239,000 // 1,000 = 239
1,239,911 911
9325
9xxx nine thousahd
3xx three hundred
2x twenty
5 five
351
3xx three hundred
5x fify
1 one
311
3xx three hundred
1x ten --> NOT ten one, eleven...
1 one
"""
class Solution(object):
def __init__(self):
self.LESS_THAN_20 = ["", "One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten", "Eleven",
"Twelve", "Thirteen", "Fourteen", "Fifteen", "Sixteen", "Seventeen", "Eighteen", "Nineteen"]
self.TENS = ["", "Ten", "Twenty", "Thirty", "Forty", "Fifty", "Sixty", "Seventy", "Eighty", "Ninety"]
self.THOUSANDS = ["", "Thousand", "Million", "Billion"]
def numberToWords(self, num):
if num == 0:
return "Zero"
i = 0
words = ""
while num > 0:
remainder = num % 1000
if remainder != 0:
words = self.helper(remainder) + self.THOUSANDS[i] + " " + words
num //= 1000
i += 1
return words.strip()
def helper(self, num):
if num == 0:
return ""
elif num < 20:
return self.LESS_THAN_20[num] + " "
elif num < 100:
return self.TENS[num // 10] + " " + self.helper(num % 10)
else:
return self.LESS_THAN_20[num // 100] + " Hundred " + self.helper(num % 100)
print(Solution().numberToWords(1239991 )) |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 18:49:29 2018
@author: PENG YANG YANG
"""
########################
# TEMA 1 - EJERCICIO 5 #
########################
'''
Ejercicios Funciones
1. Crear una función que divida un número por tres.
2. Crear una función que eleve a la 10 un determinado valor.
3. Crear una función que calcule el área de una circunferencia.
4. Crear una función que calcule el área de un triángulo.
5. Crear una función que determine si un número es impar.
'''
%reset -f
#%% 1. Crear una función que divida un número por tres.
def dividir3(x):
return x/3
#%% 2. Crear una función que eleve a la 10 un determinado valor.
def power10(x):
return x**10
#%% 3. Crear una función que calcule el área de una circunferencia.
def area_circulo(**kwargs):
from math import pi
radio = kwargs.get('radio', None)
diametro = kwargs.get('diametro', None)
if radio != None and diametro != None:
if diametro / 2 != radio:
print('Valores erróneos')
raise ValueError
elif radio == None and diametro == None:
print('Valores Vacíos')
raise ValueError
if radio != None:
area = pi*(radio**2)
elif diametro != None:
radio = diametro / 2
area = pi*(radio**2)
return area
print(area_circulo(radio = 2))
print(area_circulo(diametro = 2))
print(area_circulo(radio = 2, diametro = 4))
print(area_circulo(radio = 2, diametro = 6))
#%% 4. Crear una función que calcule el área de un triángulo.
def area_triangulo(base, altura):
return base*altura/2
#%% 5. Crear una función que determine si un número es impar.
def impar(x):
if x%2 == 0:
print('Par!')
else:
print('Impar!') |
# -*- coding: UTF-8 -*-
# python -m pip install twstock
# py stock.py
# https://twstock.readthedocs.io/zh_TW/latest/
import twstock
import json
from datetime import datetime
from random import randint
from time import sleep
import pandas_datareader as pdr
from ids import stockIds
twStockList = twstock.codes
# twstock.codes 台灣上市上櫃股票代號
# twstock.tpex 台灣上櫃股票代號
# twstock.twse 台灣上市股票代號
# print(twStockList)
# ------- 取得股市代碼 ----
def genAllStockList(twStockList):
allStock=[]
for key in sorted(twStockList.keys()):
stockInfo = twStockList[key]
eachStackData = {}
eachStackData["type"] = stockInfo.type
eachStackData["code"] = stockInfo.code
eachStackData["name"] = stockInfo.name
eachStackData["ISIN"] = stockInfo.ISIN
eachStackData["start"] = stockInfo.start
eachStackData["market"] = stockInfo.market
eachStackData["group"] = stockInfo.group
eachStackData["CFI"] = stockInfo.CFI
eachStackData["id"] = key
allStock.append(eachStackData)
stockDataFile = open("allcode.json", "w")
stockDataFile.write(json.dumps(allStock, sort_keys=True,indent=4, separators=(',', ':')))
stockDataFile.close()
# ------- 生成前三天資料 ----
def genLast3Day(twStockList):
allStock = []
for key in sorted(twStockList.keys()):
stockInfo = twStockList[key]
# 先只撈股票
if stockInfo.type == '\u80a1\u7968' or stockInfo.type == '股票':
eachStackData = {}
eachStackData["type"] = stockInfo.type
eachStackData["code"] = stockInfo.code
eachStackData["name"] = stockInfo.name
eachStackData["ISIN"] = stockInfo.ISIN
eachStackData["start"] = stockInfo.start
eachStackData["market"] = stockInfo.market
eachStackData["group"] = stockInfo.group
eachStackData["CFI"] = stockInfo.CFI
eachStackData["id"] = key
print("讀取中: " + stockInfo.name + "(" + stockInfo.code + ")")
try:
df = pdr.DataReader(str(stockInfo.code)+'.TW', 'yahoo')
# df['High']
# df['Low']
# df['Open']
# df['Close']
# df['Volume']
# df['Adj Close']
# 取得各股3天內有開盤的資料
eachStackData["datas"] = {}
eachStackData["datas"]['date']=[str(df.index[-1]),str(df.index[-2]),str(df.index[-3])]
eachStackData["datas"]['open']=[df['Open'][-1],df['Open'][-2],df['Open'][-3]]
print(eachStackData)
allStock.append(eachStackData)
except:
print("Yahoo 不存在該股資料: " + stockInfo.name + "(" + stockInfo.code + ")")
# 寫檔案
stockDataFile = open("stock.json", "w")
stockDataFile.write(json.dumps(allStock, sort_keys=True,
indent=4, separators=(',', ':')))
stockDataFile.close()
return allStock
def nowOpenMoreThanlast3Days(now, last3days):
print('及時開盤:'+now)
print('前三天開盤:'+ str(last3days))
if float(now) > float(last3days[0]) and float(now) > float(last3days[1]) and float(now) > float(last3days[2]):
return True
else:
return False
def getLast3Day():
with open('stock.json') as data_file:
return json.load(data_file)
# ------- 主程式
# 生成清單 (你用不到吧)
# genAllStockList(twStockList)
# print(stockIds)
# 取得前三天的資料(開盤)
# last3DayData = genLast3Day(twStockList)
last3DayData = getLast3Day()
twstock.realtime.mock = False
# for eachId in stockIds:
# stock=twstock.realtime.get(eachId)
# sleep(5)
# print(stock["realtime"]["open"])
targetList=[]
for eachStock in last3DayData:
print("及時個股資料獲取中:" + eachStock['name'] +" ("+eachStock['code']+")")
stock=twstock.realtime.get(eachStock['code'])
nowOpen=stock["realtime"]["open"]
if nowOpenMoreThanlast3Days(nowOpen,eachStock['datas']['open']):
tempObj={}
tempObj["name"]=eachStock['name']
tempObj["code"]=eachStock['code']
targetList.append(tempObj)
sleep(5)
print("個股今天開盤大於前三天開盤:")
print(str(targetList))
|
import glob
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
from astropy.nddata import CCDData
import astropy.units as u
import ccdproc as ccdp
import os
import pathlib
from ccdproc import ImageFileCollection
from astropy.visualization import hist
import itertools
from astropy.stats import sigma_clip, mad_std
import time
import sys
def create_directories():
if not os.path.exists('Trimmed_Flat'):
os.makedirs('Trimmed_Flat')
print('created directory Trimmed_Flat')
if not os.path.exists('Master_Files'):
os.makedirs('Master_Files')
print('created directory Master_Files')
if not os.path.exists('Trimmed_Flat/subflatsmed'):
os.makedirs('Trimmed_Flat/subflatsmed')
print('created directory Trimmed_Flat/subflatsmed')
if not os.path.exists('Trimmed_Flat/subflatssig'):
os.makedirs('Trimmed_Flat/subflatssig')
print('created directory Trimmed_Flat/subflatssig')
def trim_flat(refresh='2'):
flatcollection = ImageFileCollection('HD115709/flat_SII', ext=4)
flag = 0
tflatpathlist = []
if refresh == '1':
for ccdf, flatn in flatcollection.ccds(return_fname=True, ccd_kwargs={'unit': 'adu'}):
if flag == 0:
print('all flats will be trimmed to :', ccdf.meta['trimsec'])
flag = 1
print('trimming', flatn)
tflat = ccdp.trim_image(ccdf, fits_section=str(ccdf.meta['trimsec']))
tflat.meta['imtype'] = ('trimmed flat', 'type of image')
tflat.meta['taxis1'] = (2048, 'dimension1')
tflat.meta['taxis2'] = (4096, 'dimension2')
tflat.write('Trimmed_Flat/' + flatn[0:8] + '_trim.fits', overwrite=True)
tflatpathlist.append('Trimmed_Flat/' + flatn[0:8] + '_trim.fits')
print('created', len(tflatpathlist), 'trimmed flats')
elif refresh == '2':
try:
tflatcollection = ImageFileCollection('Trimmed_Flat')
tflatpathlist = tflatcollection.files_filtered(imtype='trimmed flat', include_path=True)
print('found', len(tflatpathlist), 'trimmed flats')
except:
print('can\'t locate trimmed flats, create or check directory')
sys.exit(0)
return flatcollection, tflatpathlist
def sub_bias(refresh='2', bias='2'):
tflatcollection = ImageFileCollection('Trimmed_Flat')
if bias == '1':
biaspath = 'Master_Files/mbias_median.fits'
dest = 'Trimmed_Flat/subflatsmed/'
elif bias == '2':
biaspath = 'Master_Files/mbias.fits'
dest = 'Trimmed_Flat/subflatssig/'
if refresh == '1':
subflatpathlist = []
mbias = CCDData.read(biaspath, unit='adu')
for ccdf, flatn in tflatcollection.ccds(imtype='trimmed flat', return_fname=True):
subflat = ccdp.subtract_bias(ccdf, mbias, add_keyword='subbias')
subflat.meta['imtype'] = ('subflat', 'bias subtracted flat')
subflat.write(dest + flatn[0:8] + '_subbias.fits',overwrite=True)
subflatpathlist.append(dest + flatn[0:8] + '_subbias.fits')
else:
try:
subflatcollection = ImageFileCollection(dest)
subflatpathlist = subflatcollection.files_filtered(imtype='subflat', include_path=True)
print('found', len(subflatpathlist), 'subflats')
except:
print('can\'t locate subflats, create or check directory')
sys.exit()
return tflatcollection, subflatpathlist
# create directories to save files
create_directories()
# trim flat files, no refresh returns existing path list
print('do you want to trim the flats? (1. Yes / 2. Read existing files)\n')
tfref = input()
flatcollection, tflatpathlist = trim_flat(tfref)
# subtract bias from flats
refresh = input('do you want to subtract bias from flats? (1. Yes / 2. Read existing files)\n')
bias = input('select which bias to use (1. Median / 2. Sigma clipped average): \n')
tflatcollection, subflatpathlist = sub_bias(refresh, bias)
def combine_flats(refresh='2', method='2'):
if method == '1':
meta = 'med'
source = 'Trimmed_Flat/subflatsmed'
dest = 'Master_Files/mflat_median.fits'
elif method == '2':
meta = 'sig'
source = 'Trimmed_Flat/subflatssig'
dest = 'Master_Files/mflat.fits'
subflatcollection = ImageFileCollection(source)
combtime = 0
if refresh == '1':
print('found', len(subflatcollection.values('file')), 'subflats')
start = time.time()
if method == '1':
mflat = ccdp.combine(subflatcollection.files_filtered(
imtype='subflat', include_path=True),
method='median')
mflat.meta['flatcom'] = 'median'
combtime = time.time() - start
print('combination took', combtime, 'seconds')
elif method == '2':
mflat = ccdp.combine(subflatcollection.files_filtered(imtype='subflat', include_path=True),
sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,
sigma_clip_func=np.nanmedian, sigma_clip_dev_func=mad_std)
mflat.meta['flatcom'] = 'sigma'
combtime = time.time() - start
print('combination took', combtime, 'seconds')
mflat.meta['normmed'] = (np.nanmedian(mflat), 'nanmedian of the master flat')
mflat.meta['subflats'] = meta
mflat.write(dest[0:-5]+'_'+meta+'.fits', overwrite=True)
else:
try:
if method == '1':
mflat = CCDData.read('Master_Files/mflat_median_med.fits', unit='adu')
elif method == '2':
mflat = CCDData.read('Master_Files/mflat_sig.fits', unit='adu')
except:
print('can\'t locate master flat, create or check directory')
sys.exit()
return subflatcollection, mflat, dest, combtime
# combine subflat files to form master flat, metadata contains norm
print('do you want to create master flat again? (1. Yes / 2. Read existing files)\n')
mbref = input()
print('select combination method? (1. median / 2. sigma clipped average)\n')
method = input()
subflatcollection, mflat, mflatpath, combtime = combine_flats(mbref, method) |
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from .views import CreateView
from .views import DetailsView
from .views import CreateViewTeste
from .views import CreateViewCliente
from .views import CreateViewPromocao
from .views import CreateViewCategoria
from .views import DetailsViewTeste
from .views import DetailsViewCliente
from .views import DetailsViewPromocao
from .views import DetailsViewCategoria
urlpatterns = {
url(r'^bucketlists/$', CreateView.as_view(), name="create"),
url(r'^bucketlists/(?P<pk>[0-9]+)/$',
DetailsView.as_view(), name="details"),
url(r'^teste/$', CreateViewTeste.as_view(), name="create"),
url(r'^teste/(?P<pk>[0-9]+)/$',
DetailsViewTeste.as_view(), name="details"),
url(r'^cliente/$', CreateViewCliente.as_view(), name="create"),
url(r'^cliente/(?P<pk>[0-9]+)/$',
DetailsViewCliente.as_view(), name="details"),
url(r'^sale/$', CreateViewPromocao.as_view(), name="create"),
url(r'^sale/(?P<pk>[0-9]+)/$', DetailsViewPromocao.as_view(), name="details"),
url(r'^categorie/$', CreateViewCategoria.as_view(), name="create"),
url(r'^categorie/(?P<pk>[0-9]+)/$', DetailsViewCategoria.as_view(), name="details"),
}
urlpatterns = format_suffix_patterns(urlpatterns)
|
import pygame, sys, Draw, Enemies, Func, Gen
import Config as C
from pygame.locals import *
from Const import *
from Images import *
from random import randint
pygame.init()
C.init()
# --- To remove later --- #
AreaX = 20; AreaY = 16
Gen.Create_Map(AreaX, AreaY)
Draw.Map();
Draw.Model(Humie,HumieX,HumieY)
# Create an enemy of chosen type
def Spawn(EnemyType, x=randint(0,AreaX-1), y=randint(0,AreaY-1)): # Spawn Enemy in x,y
if EnemyType == 'Droid':
Enemy = Enemies.droid
while True:
if C.Map[x][y][2]:
C.Bots.append(Enemy(x,y))
break
else:
x = randint(0,AreaX-1)
y = randint(0,AreaY-1)
# Creating a bunch oh enemies for testing
for k in range(10):
Spawn('Droid', k+2,k+2)
# Main game loop
while True:
# Checking for player actions
for event in pygame.event.get():
Move = False # if True, activates move
if event.type == QUIT:
pygame.quit()
sys.exit()
# Movement
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
Move = True
Humie = HumieRight
if C.Map[HumieX+1][HumieY][2] == True and C.Map[HumieX+1][HumieY][3] == True:
HumieX += 1
if event.key == pygame.K_LEFT:
Move = True
Humie = HumieLeft
if C.Map[HumieX-1][HumieY][2] == True and C.Map[HumieX-1][HumieY][3] == True:
HumieX -= 1
if event.key == pygame.K_UP:
Move = True
Humie = HumieUp
if C.Map[HumieX][HumieY-1][2] == True and C.Map[HumieX][HumieY-1][3] == True:
HumieY -= 1
if event.key == pygame.K_DOWN:
Move = True
Humie = HumieDown
if C.Map[HumieX][HumieY+1][2] == True and C.Map[HumieX][HumieY+1][3] == True:
HumieY += 1
if event.key == pygame.K_SPACE:
Move = True
if Humie == HumieLeft:
for Bot in C.Bots:
Bot.GetHit(HumieX-1,HumieY,10)
if Humie == HumieRight:
for Bot in C.Bots:
Bot.GetHit(HumieX+1,HumieY,10)
if Humie == HumieUp:
for Bot in C.Bots:
Bot.GetHit(HumieX,HumieY-1,10)
if Humie == HumieDown:
for Bot in C.Bots:
Bot.GetHit(HumieX,HumieY+1,10)
# Calculating enviroment reaction
if Move == False: continue
for Bot in C.Bots:
Bot.Move(HumieX, HumieY)
DISPLAY.fill(BLACK)
# Drawing things
Draw.Map()
#Draw.WalkMap()
for Bot in C.Bots:
Bot.Draw()
Draw.Model(Humie,HumieX,HumieY)
print(HumieX,HumieY)
pygame.display.update()
FpsClock.tick(FPS)
|
from pprint import pprint
from django.core import serializers
from django.shortcuts import get_object_or_404
from django.utils import simplejson
from django.http import HttpResponse
from django.contrib.auth.models import User
def member(request, id):
user = get_object_or_404(User, id=id)
log_entries = user.log_entries.all()[:10]
try:
log_entries[0]
except:
return HttpResponse("No activity yet")
if user.first_name and user.last_name:
name = user.first_name + " " + user.last_name
else:
name = user.username
try:
latest_checkin_entry = user.log_entries.filter(on_illutron=True)[0].time.isoformat()
except IndexError:
latest_checkin_entry = None
try:
image = user.get_profile().image.url
except:
image = None
log_list = []
for log in log_entries:
log_list.append({
#'latitude': log.latitude,
#'longitude': log.longitude,
'on_illutron': log.on_illutron,
'time': log.time.isoformat(),
'description': log.description
})
data = {
'name': name,
'on_illutron': log_entries[0].on_illutron,
'latest_checkin_time': latest_checkin_entry,
'image': image,
'log': log_list
}
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
def member_list(request):
"""
Return a list of all members.
"""
data = simplejson.dumps(list(User.objects.all().values('id', 'username',)))
return HttpResponse(data, mimetype='application/json')
|
import os
import scipy.misc
import numpy as np
import random
from utils import pp
import utils
import test
from model import PGN
from data.Bouncing_balls_data_reader import Bouncing_Balls_Data_Reader
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 500, "Epoch to train (you can always interrupt) [500]")
#flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
#flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_integer("batch_size", 128, "The size of batch images [128]")
flags.DEFINE_string("dataset", "/atlas/u/anthony/future_frame_prediction/data/Bouncing_balls_data_color", "The path of the dataset")
flags.DEFINE_string("dataset_name", "debug_bouncing_balls", "The name of the dataset when saved to checkpoints")
flags.DEFINE_string("checkpoint_dir", "checkpoints/unlabeled_checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "samples/unlabeled_samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("is_train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("is_test", False, "True for testings, false to not run testing [False]")
flags.DEFINE_boolean("is_debug", False, "True for debugging (uses small dataset) [False]")
flags.DEFINE_boolean("is_visualize", False, "True for visualizing [False]")
flags.DEFINE_float("lambda_adv_loss", 0.0002, "The weight of the adverserial loss during training [0.0002]")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
random.seed(31241)
np.random.seed(41982)
tf.set_random_seed(1327634)
color = True # Must change this and the dataset Flags to the correct path to use color
if FLAGS.is_debug:
reader = Bouncing_Balls_Data_Reader(FLAGS.dataset, FLAGS.batch_size, color=color, train_size=160*5, validation_size=8*5, test_size=8*5, num_partitions=5)
else:
reader = Bouncing_Balls_Data_Reader(FLAGS.dataset, FLAGS.batch_size, color=color)
data_fn = lambda epoch, batch_index: reader.read_data(batch_index, reader.TRAIN)
frame_shape = reader.read_data(0, reader.TRAIN).shape[2:]
print("Frame shape: ", frame_shape)
num_batches = reader.num_batches(reader.TRAIN)
print("Num batches: %d" % num_batches)
input_sequence_range = range(5, 16)
print("Input sequence range min: %d, max: %d" % (min(input_sequence_range), max(input_sequence_range)))
save_sample_fn = utils.gen_save_sample_fn(FLAGS.sample_dir, image_prefix="train")
with tf.Session() as sess:
pgn = PGN(sess, FLAGS.dataset_name, FLAGS.epoch, num_batches, FLAGS.batch_size, input_sequence_range,
data_fn, frame_shape=frame_shape, save_sample_fn=save_sample_fn, checkpoint_dir=FLAGS.checkpoint_dir,
lambda_adv_loss= FLAGS.lambda_adv_loss)
if FLAGS.is_train:
pgn.train()
else:
print("Loading from: %s" %(FLAGS.checkpoint_dir,))
if pgn.load(FLAGS.checkpoint_dir) :
print(" [*] Successfully loaded")
else:
print(" [!] Load failed")
if FLAGS.is_test:
result = test.test(pgn, reader)
result_str = pp.pformat(result)
fid = open(os.path.join(FLAGS.sample_dir, 'test_out.txt'), mode='w')
fid.write(unicode(result_str))
fid.close()
if FLAGS.is_visualize:
for i in range(3):
vid_seq = reader.read_data(i, data_set_type=reader.TEST, batch_size=1)[:, 0, :, :, :]
utils.make_prediction_gif(pgn, os.path.join(FLAGS.sample_dir, 'vis_%d.gif' % i), video_sequence=vid_seq)
utils.plot_convergence(pgn.get_MSE_history(), "MSE Convergence",
path=os.path.join(FLAGS.sample_dir, "vis_MSE_convergence.png"))
if __name__ == '__main__':
tf.app.run() |
import pandas as pd
import re
import random
import math
import sys
def create_dataframe():
articles1_df = pd.read_csv(sys.argv[1] + "/articles1.csv")
stylo1_df = pd.read_csv(sys.argv[1] + "/stylo_data_1.csv")
all1_df = pd.concat([articles1_df, stylo1_df], axis=1)
articles2_df = pd.read_csv(sys.argv[1] + "/articles2.csv")
stylo2_df = pd.read_csv(sys.argv[1] + "/stylo_data_2.csv")
all2_df = pd.concat([articles2_df, stylo2_df], axis=1)
articles3_df = pd.read_csv(sys.argv[1] + "/articles3.csv")
stylo3_df = pd.read_csv(sys.argv[1] + "/stylo_data_3.csv")
all3_df = pd.concat([articles3_df, stylo3_df], axis=1)
all_df = pd.concat([all1_df, all2_df, all3_df])
# print(articles_df.groupby('publication').size())
remove_df = pd.read_csv(sys.argv[1] + "/remove_dataset.csv")
all_df = all_df[~all_df['id'].isin(remove_df['id'])]
return all_df
def write_data(df, train_file, test_file):
#get training and test data
print('splitting up train and test')
train_ids = pd.read_csv(train_file)
train_df = df[df['id'].isin(train_ids['id'])]
test_ids = pd.read_csv(test_file)
test_df = df[df['id'].isin(test_ids['id'])]
#get documents and labels for training data
print('writing training data to file')
write_file(train_df, sys.argv[2])
#same for test
print('writing test data to file')
write_file(test_df, sys.argv[3])
def write_file(df, filename):
all_pubs = list(df['publication'].unique())
article_text = df['content'].tolist()
article_text = list(map(lambda x: re.sub('\s+',' ', x).strip(), article_text))
publications = df['publication'].tolist()
num_sen = df['num_sen'].tolist()
num_word = df['num_word'].tolist()
avg_word = df['avg_word_per_sen'].tolist()
num_entity = df['num_named_entity'].tolist()
frac_noun = df['frac_words_noun'].tolist()
frac_verb = df['frac_words_verb'].tolist()
aps_neg = df['avg_polarity_score_negative'].tolist()
aps_neut = df['avg_polarity_score_neutral'].tolist()
aps_pos = df['avg_polarity_score_positive'].tolist()
aps_cmp = df['avg_polarity_score_compound'].tolist()
vps_neg = df['var_polarity_score_negative'].tolist()
vps_neut = df['var_polarity_score_neutral'].tolist()
vps_pos = df['var_polarity_score_positive'].tolist()
vps_cmp = df['var_polarity_score_compound'].tolist()
stylo_string = "%d\t%d\t%f\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f"
with open(filename, 'w') as f:
for i in range(len(publications)):
t = (num_sen[i], num_word[i], avg_word[i], num_entity[i], frac_noun[i],
frac_verb[i], aps_neg[i], aps_neut[i], aps_pos[i], aps_cmp[i],
vps_neg[i], vps_neut[i], vps_pos[i], vps_cmp[i])
f.write(article_text[i] + "\t" + publications[i] + "\t" + (stylo_string%t) +"\n")
if __name__ == "__main__":
if len(sys.argv) != 6:
sys.exit("Usage: %s article_dir train_file_name test_file_name train_ind_file test_ind_file" % sys.argv[0])
articles_df = create_dataframe()
write_data(articles_df, sys.argv[4], sys.argv[5])
|
import math as m
print(m.sqrt(16))
print(m.pi)
print(m.e)
print(m.floor(3.9))
print(m.ceil(3.9))
print(m.pow(3,2)) |
'''
vals = set()
def digits( sum=0, values=(), digit=0, amount=21 ):
if isSquare( sum ):
vals.add( values )
print "***",values
if digit == 20:
return
else:
for i in xrange( 1, amount + 1 ):
digits( sum + i**2, values + (i,), digit + 1, i )
def isSquare( num ):
x = num // 2
seen = set([x])
while x * x != num:
x = ( x + ( num // 2 ) ) // 2
if x in seen: return False
seen.add( x )
return True
digits()
for val in vals:
print val
'''
|
# Python imports
import numpy as np
# Core imports
from core.agents.models.tabular import DiscreteTabularModel
class RMaxModel(DiscreteTabularModel):
def __init__(self, observation_space, action_space, default_reward, limit):
self.known_rewards = None
self.known_transitions = None
DiscreteTabularModel.__init__(self, observation_space, action_space, default_reward, limit)
def update(self, state, action, reward, next_state):
if (state is not None) and (action is not None) and (next_state is not None):
was_not_known = not self.is_known_state(state)
DiscreteTabularModel.update(self, state, action, reward, next_state)
# New known (state, action) pair
if was_not_known and self.is_known_state(state):
self.known_rewards[state] = self.rewards[state]
self.known_transitions[state] = self.transitions[state]
def reset(self):
DiscreteTabularModel.reset(self)
self.known_rewards = np.array(self.rewards)
self.known_transitions = np.array(self.transitions)
|
def pickUpNearestItem(items):
nearestItem = hero.findNearest(items)
if nearestItem:
moveTo(nearestItem.pos)
def moveTo(position, fast=True):
if (hero.isReady("jump") and hero.distanceTo(position) > 10 and fast):
hero.jumpTo(position)
else:
hero.move(position)
def attack(target):
if target:
if (hero.distanceTo(target) > 10):
moveTo(target.pos)
elif (hero.isReady("bash")):
hero.bash(target)
elif (hero.canCast('chain-lightning', target)):
hero.cast('chain-lightning', target)
elif (hero.isReady("attack")):
hero.attack(target)
else:
hero.shield()
while True:
items = hero.findItems()
enemyattack = hero.findNearestEnemy()
if enemyattack and hero.distanceTo(enemyattack) < 2:
if (enemyattack):
attack(enemyattack)
else:
hero.shield()
else:
if len(items) > 0:
pickUpNearestItem(items)
|
#!/usr/bin/env python
#
# collect.py renames files from subdirectories
#
# Copyright F. Nedelec, 2007--2018
"""
Synopsis:
Rename files or folders following a pattern containing an integer index,
as in 'image0001.png'. The file will be moved in the current directory
The number in the file name is incremented automatically for each file, and
also if files with this name already exist. Thus pre-existing files are not
overwritten, such that 'collect.py' can be used to pool together many similar
files in a common directory.
Syntax:
collect.py PATTERN [INTEGER] [--copy] PATH1 [PATH2] [PATH3] ...
Arguments:
PATTERN specifies the name of the output files, and should contain a variable
part that will be replaced by an integer. It can be a 'scanf' compatible
pattern such as '%i' or '%0Xi', for example 'image%04i.png'.
A character '%' repeated multiple times, such as `%%%%` or `%%%%%%`, can
also be used to specify the size of the integer portion of the name.
The pattern can include a '/' that would indicate a directory, and if this
directory does not exist, collect.py will create it before moving the file.
if specified, `--copy` will copy the files/directory instead of moving them
if specified, INTEGER is the first index to be used (default=0)
PATH1, PATH2, etc. is a list of files or directories
Examples:
collect.py image%%%%.png *.png
will rename image files to: image0000.png, image0001.png, etc.
collect.py --copy image%%%%.png 1 run*/image.png
will copy the image files, starting at index 1
collect.py run%%%%/config.cym config*.cym
will create directories `run????` and move the `config*.cym` files into them
F. Nedelec, 2012--2018. Last modified 2.10.2017
"""
import sys, shutil, os, curses.ascii
#------------------------------------------------------------------------
def copy_recursive(src, dst):
"""Copy directory recursively"""
if os.path.isfile(src):
shutil.copy2(src, dst)
elif os.path.isdir(src):
try:
os.mkdir(dst)
except OSError:
pass
files = os.listdir(src)
for f in files:
s = os.path.join(src, f)
d = os.path.join(dst, f)
copy_recursive(s, d)
def main(args):
"""rename files"""
do_copy = False
arg = args.pop(0);
# check if 'copy' specified before pattern
if arg=='-c' or arg=='--copy' or arg=='copy=1':
do_copy = True
pattern = args.pop(0);
else:
pattern = arg
# check validity of the pattern
if os.path.isfile(pattern):
sys.stderr.write("Error: first argument should be the pattern used to build output file name")
return 1
try:
res = ( pattern % 0 )
except:
# check for repeated '%' character:
for n in range(10,0,-1):
s = pattern.find('%'*n)
if s > 0:
pattern = pattern.replace('%'*n, '%0'+str(n)+'i', 1);
break
try:
res = ( pattern % 0 )
except:
sys.stderr.write("Error: the pattern should accept an integer: eg. '%04i'\n")
return 1
for c in res:
if curses.ascii.isspace(c):
sys.stderr.write("Error: the pattern includes or generates white space character\n")
return 1
# go
paths = []
idx = 0
# parse arguments:
for arg in args:
if arg=='-c' or arg=='--copy' or arg=='copy=1':
do_copy = True
elif args[0].isdigit():
idx = int(args[0])
elif os.path.isfile(arg) or os.path.isdir(arg):
paths.append(arg)
else:
sys.stderr.write("Error: '%s' is not a file or directory" % arg)
return 1
# process all files
res = []
for src in paths:
while idx < 1000000:
dst = pattern % idx
idx += 1
if dst == src:
res.append(dst)
break
if not os.path.exists(dst):
#make directory if name include a directory that does not exist:
dir = os.path.dirname(dst)
if dir and not os.path.isdir(dir):
os.mkdir(dir)
# process file:
if do_copy:
copy_recursive(src, dst)
else:
os.rename(src, dst)
res.append(dst)
print("%s -> %s" % (src, dst))
break
return res
#------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1].endswith("help"):
print(__doc__)
else:
main(sys.argv[1:])
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.core.models import Shop
from shuup_mailchimp.interface.base import ShuupMailchimp
def update_or_create_contact(sender, instance, **kwargs):
"""
Signal handler for Shuup contacts
Add's contact email to every configured shop list
"""
if not instance.marketing_permission:
return
for shop in Shop.objects.all():
add_email_to_list(shop, instance.email, contact=instance)
def update_or_create_contact_from_order(sender, order, *args, **kwargs):
"""
Signal handler for Shuup orders
"""
if order.email and order.marketing_permission:
add_email_to_list(order.shop, order.email, contact=order.customer)
return
def add_email_to_list(shop, email, contact=None):
"""
Add email and optional contact to Mailchimp list
:param email: email to add in the list
:param contact: optional associated Shuup contact
:return:
"""
client = ShuupMailchimp(shop)
client.add_email_to_list(email, contact=contact)
|
import units0
h = units0.ladspa_host("yay", [["/usr/lib/ladspa/inv_filter.so", "invada_lp_mono_filter_module_0_1"]])
|
INSTALLED_APPS = [
'filebrowser',
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.redirects',
'django.contrib.flatpages',
# 'crispy_forms',
'tinymce',
'rosetta',
'autotranslate',
# 'import_export',
'core.apps.CoreConfig',
# 'pages.apps.PageConfig',
# 'content.apps.ContentConfig',
# 'order.apps.OrderConfig',
'boilerplate.pages.apps.PageConfig',
'boilerplate.content.apps.ContentConfig',
'boilerplate.order.apps.OrderConfig',
]
|
l = open('i02.txt').read().strip().split('\n')
i = 0
for e in l:
rs, c, s = e.split()
mm = rs.split('-')
if (int(mm[0]) <= s.count(c[0]) <= int(mm[1])):
i += 1
print("Valid passwords:",i)
|
'''
1.使用面向对象思想,写出下列场景
玩家(攻击力)攻击敌人,敌人受伤(血量)后掉血,还可能死亡(播放动画)
敌人(攻击力)攻击玩家,玩家受伤(血量)后碎屏,还可能死亡(游戏结束)
2.完成学生管理器之-添加学生
'''
'''
class gameModel:
def __init__(self, attack=0, blood=0, die=0):
self.attack = attack
self.blood = blood
self.die = die
@property
def attack(self):
return self.__attack
@attack.setter
def attack(self,value):
self.__attack = value
@property
def blood(self):
return self.__blood
@blood.setter
def blood(self,value):
self.__blood = value
@property
def die(self):
return self.__die
@die.setter
def die(self,value):
self.__die = value
class gameManagerController:
def __init__(self):
self.mode = gameModel()
def game_attack(self):
self.mode.attack +=10
def low_blood(self):
pass
class gameManagerViewer:
def __display_menu(self):
print("1)攻击敌人")
print("2)攻击玩家")
def __select_menu(self):
number = input("请输入选择:")
gamecontroller = gameManagerController()
if number =='1':
gamecontroller.game_attack()
elif number =='2':
gamecontroller.low_blood()
def main(self):
self.__display_menu()
self.__select_menu()
managerViewer = gameManagerViewer()
'''
class player:
def __init__(self,hp=0,atk=0):
self.atk = atk
self.hp=hp
def attack(self,enemy): #攻击
print("打你")
enemy.damage(self.atk)
def damage(self,enemy_value): #受伤
self.hp -=enemy_value
print("受伤啦")
if self.hp<=0:
self.__die()
def __die(self):
print("玩家死亡")
class Enemy:
def __init__(self,hp=0,atk=0):
self.hp = hp
self.atk = atk
def attack(self,player):
player.damage()
def damage(self,atk_value):
self.hp -=atk_value
print("受伤啦")
if self.hp <=0:
self.__die()
def __die(self):
print("敌人死亡")
p01 =player(100,50)
e01 = Enemy(100,50)
p01.attack(e01)
|
#6) Faça um programa que leia um valor em metros e o mostre convertido em milímetros.
metros = float(input("Digite um valor em metros para ser convertido para milimetros: "))
milimetros = metros * 1000
print(f"A conversão dos metros digitados é: {milimetros}") |
import math, os
import pandas as pd
l = float(input('Unesi raspon grede l [m]: '))
d = float(input('Unesi staticku visinu grede d [cm]: '))
d = d/100
b = float(input('Unesi sirinu preseka b [cm]: '))
b = b/100
fckMPA = int(input('Unesi karakteristicnu cvrstocu betona na pritisak fck [MPa]: '))
K = float(input('Unesi koeficijent statickog sistema K: '))
Asreq = float(input('Unesi proracunski potrebnu armaturu od savijanja As,req [cm^2]: '))
Asreq = Asreq*math.pow(10, -4)
Asprov = float(input('Unesi usvojenu armaturu od savijanja As,prov [cm^2]: '))
Asprov = Asprov*math.pow(10, -4)
ro0 = math.sqrt(fckMPA)*math.pow(10, -3)
ro = Asreq/(b*d)
if ro <= ro0:
granica = K*(11 + 1.5*math.sqrt(fckMPA)*ro0/ro + 3.2*math.sqrt(fckMPA)*math.pow(ro0/ro - 1, 3/2))
elif ro > ro0:
roprim = float(input('Unesi procenat armiranja pritisnute armature \u03C1` [%]: '))
roprim = roprim/100
granica = K*(11 + 1.5*math.sqrt(fckMPA)*ro0/(ro - roprim) + 1/12*math.sqrt(fckMPA)*math.sqrt(roprim/ro0))
korekcija = Asprov/Asreq
granica = granica*korekcija
if l/d <= granica:
print('\nKontrola ugiba ..... OK')
elif l/d > granica:
print('\nKontrola ugiba indirektnom metodom ne zadovoljava!')
print('l/d, stvarno = ', l/d)
print('l/d, lim = ', granica)
if 'BetonPodaci.csv' not in os.listdir():
print('BetonPodaci.csv se ne nalazi u radnom folderu!')
exit()
else:
df = pd.read_csv('BetonPodaci.csv', delimiter = ';', encoding = 'UTF-8', skipinitialspace = True)
fck_lista = df['fck [Mpa]'].to_list()
if fckMPA in fck_lista:
indeks = fck_lista.index(fckMPA)
else:
print(f'{fckMPA} nije standardna karakteristicna cvrstoca betona!')
exit()
|
from django.urls import path
from django.contrib import admin
from django.conf.urls import include, url
from hrapp.models import *
from hrapp import views
from .views import *
from django.conf.urls import url
app_name = 'hrapp'
urlpatterns = [
path('', home, name='home'),
path('accounts/', include('django.contrib.auth.urls')),
path('logout/', logout_user, name='logout'),
path('employees/<int:employee_id>/form', employee_edit_form, name='employee_edit_form'),
path('employees/', employee_list, name='employee_list'),
path('training_programs/', training_list, name='training_list'),
path('employees/form', employee_form, name='employee_form'),
path('employees/<int:employee_id>/', employee_details, name='employee'),
path('training_programs/form', training_form, name='training_form'),
path('training_programs/<int:training_id>', training_details, name='training_details'),
path('training_programs/<int:training_id>/form', training_edit_form, name='training_edit_form'),
path('departments/', department_list, name='department_list'),
path('departments/<int:department_id>/', get_department_and_employees, name='department_details'),
path('^department/form$', department_form, name='department_form'),
path('computers/', computer_list, name='computer_list'),
path('computer/form', computer_form, name='computer_form'),
path('computer/<int:computer_id>/', computer_details, name='computer')
]
|
# Copyright 2010 Jose Maria Zambrana Arze <contact@josezambrana.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from google.appengine.ext import blobstore
from google.appengine.ext.blobstore import BlobInfo
from django import http, template
from django.shortcuts import render_to_response
from blob.models import Blob
from common import decorator
from common import common_views
@decorator.admin_required
def blobinfo_upload(request):
redirect_to = request.get_full_path()
upload_url = blobstore.create_upload_url('/blobstore/upload')
c = template.RequestContext(request, locals())
return render_to_response("blob_upload.html", c)
@decorator.admin_required
def blob_admin(request):
return common_views.content_admin(request, 'blob', model=BlobInfo, tpl='blob_admin.html')
def blob_serve(request, slug):
_file = Blob.get(slug=slug)
response = http.HttpResponse(content_type=_file.content_type)
response.write(_file.content)
return response |
rule pylint:
"""
Created:
2019-03-04 16:07:57
Aim:
Note:
Currently not working
Test:
out/pylint/ln/updir/mw-sst/src/snakemake/functions/mapper2.snake.txt"
"""
input:
"out/{filler}"
log:
"out/pylint/{filler}.txt"
conda:
"../envs/pylint.yaml"
shell:
"pylint {input} > {output}"
rule pyreverse:
"""
Test:
out/pyreverse/ln/updir/mw-sst/src/snakemake/functions/mapper.dot
"""
input:
py = "out/{filler}.py"
output:
py = "out/pyreverse/{filler}.py",
png = "out/pyreverse/{filler}.png"
#dot = "out/pyreverse/{filler}.dot"
conda:
"../envs/pylint.yaml"
shell:
"ln -srf {input.py} {output.py}; "
"cd `dirname {output.py}`; "
"pyreverse -o png `basename {output.py}`"
|
import numpy
#import threading
import multiprocessing
import ctypes
from scipy import linalg
from scipy import ndimage
from keras.preprocessing.image import load_img, img_to_array, array_to_img#, save_img
from keras.utils import Sequence
import glob
#from PIL import Image
from skimage import util
from skimage import transform
import itertools
import os
import re
import h5py
WORKERS = 12
CACHE_SIZE = 32
dims = (256,256,1)
#NORM_DATA_MODE = 0 # 0 - per image over all channels; 1 - per image per channel; 2 - flat-field norm
TYPES = {"XRAY": 0, "SCATTER": 1, "CT": 2, "prenormalized": 3}
def numpy_normalize(v):
norm = numpy.linalg.norm(v)
if norm == 0:
return v
return v/norm
def normaliseFieldArray(a, numChannels, flatField=None, itype=TYPES["CT"]):
minx = None
maxx = None
if itype == TYPES['XRAY']:
if flatField!=None:
a = numpy.clip(numpy.divide(a, flatField), 0.0, 1.0)
else:
if numChannels<=1:
minx = numpy.min(a)
maxx = numpy.max(a)
a = (a - minx) / (maxx - minx)
else:
minx = []
maxx = []
for channelIdx in itertools.islice(itertools.count(), 0, numChannels):
minx.append(numpy.min(a[:, :, channelIdx]))
maxx.append(numpy.max(a[:, :, channelIdx]))
a[:, :, channelIdx] = (a[:, :, channelIdx] - minx[channelIdx]) / (maxx[channelIdx] - minx[channelIdx])
elif itype == TYPES['SCATTER']:
if numChannels<=1:
minx = 0
maxx = numpy.max(numpy.abs(a))
a = (a-minx)/(maxx-minx)
else:
minx = []
maxx = []
for channelIdx in itertools.islice(itertools.count(), 0, numChannels):
#minx.append(numpy.min(flatField[:, :, channelIdx]))
#maxx.append(numpy.max(flatField[:, :, channelIdx]))
minx.append(0)
maxx.append(numpy.max(numpy.abs(a[:, :, channelIdx])))
a[:, :, channelIdx] = (a[:, :, channelIdx] - minx[channelIdx]) / (maxx[channelIdx] - minx[channelIdx])
elif itype == TYPES['CT']:
#NORMALIZE BY MAX MIN RANGE
if numChannels<=1:
minx = numpy.min(a)
maxx = numpy.max(a)
a = (a - minx) / (maxx-minx)
else:
minx = []
maxx = []
for channelIdx in itertools.islice(itertools.count(), 0, numChannels):
minx.append(numpy.min(a[:, :, channelIdx]))
maxx.append(numpy.max(a[:, :, channelIdx]))
a[:, :, channelIdx] = (a[:, :, channelIdx] - minx[channelIdx]) / (maxx[channelIdx] - minx[channelIdx])
# a=numpy.exp(-a)
# a=1-numpy.exp(-a)
elif itype == TYPES['prenormalized']:
a=a
return minx, maxx, a
def denormaliseFieldArray(a, numChannels, minx=None, maxx=None, flatField=None, itype=TYPES["CT"]):
if itype == TYPES['XRAY']:
if flatField != None:
a = a * flatField
else:
if numChannels <= 1:
a = a * (maxx - minx) + minx
else:
for channelIdx in itertools.islice(itertools.count(), 0, numChannels):
a[:, :, channelIdx] = a[:, :, channelIdx] * (maxx[channelIdx] - minx[channelIdx]) + minx[channelIdx]
elif itype == TYPES['SCATTER']:
if numChannels <= 1:
a = a*(maxx-minx)+minx
else:
for channelIdx in itertools.islice(itertools.count(), 0, numChannels):
a[:, :, channelIdx] = a[:, :, channelIdx] * (maxx[channelIdx] - minx[channelIdx]) + minx[channelIdx]
elif itype == TYPES['CT']:
# #NORMALIZE BY MAX MIN RANGE
if numChannels <= 1:
a = a * (maxx - minx) + minx
else:
for channelIdx in itertools.islice(itertools.count(), 0, numChannels):
a[:, :, channelIdx] = a[:, :, channelIdx] * (maxx[channelIdx] - minx[channelIdx]) + minx[channelIdx]
# a=-numpy.log(1-a)
elif itype == TYPES['prenormalized']:
a=a
return a
class ScatterPhantomGenerator(Sequence):
def __init__(self, batch_size=1, image_size=(128, 128), input_channels=32, target_size=(128, 128), output_channels=1, useResize=False,
useCrop=False, useZoom=False, zoomFactor=1.0, useAWGN = False, useMedian=False, useGaussian=False,
useFlipping=False, useNormData=False, cache_period=512, save_to_dir=None, save_format="png", threadLockVar=None, useCache=False):
self.x_type=TYPES["CT"]
self.y_type=TYPES["CT"]
self.batch_size = batch_size
self.image_size = image_size
self.target_size = target_size
self.input_channels = input_channels
self.output_channels = output_channels
self.dtype = numpy.float32
self.useResize = useResize
self.useNormData = useNormData
self.numImages = 0
dims = (target_size[0], target_size[1], input_channels)
# ============================ #
# Data Augmentation parameters #
# ============================ #
self.useAWGN = useAWGN
if self.x_type==TYPES["XRAY"]:
if self.input_channels>1:
self.MECTnoise_mu = numpy.array(
[0.26358, 0.24855, 0.23309, 0.22195, 0.21639, 0.21285, 0.21417, 0.21979, 0.22502, 0.23387, 0.24120,
0.24882, 0.25177, 0.25594, 0.26005, 0.26350, 0.27067, 0.27440, 0.27284, 0.26868, 0.26477, 0.25461,
0.24436, 0.24287, 0.23849, 0.24022, 0.23915, 0.23874, 0.23968, 0.23972, 0.24100, 0.23973, 0.23921,
0.24106, 0.24177, 0.24155, 0.24358, 0.24578, 0.24682, 0.24856, 0.24969, 0.25206, 0.25337, 0.25650,
0.25627, 0.25921, 0.26303, 0.26615, 0.26772, 0.26882, 0.27248, 0.27400, 0.27722, 0.27905, 0.28138,
0.28406, 0.28593, 0.28830, 0.29129, 0.29420, 0.29673, 0.29776, 0.29955, 0.30050, 0.30151, 0.30196,
0.30340, 0.30282, 0.30546, 0.30509, 0.30569, 0.30667, 0.30512, 0.30413, 0.30496, 0.30474, 0.30525,
0.30534, 0.30503, 0.30635, 0.30539, 0.30561, 0.30660, 0.30491, 0.30486, 0.30291, 0.30323, 0.30253,
0.29960, 0.29734, 0.29760, 0.29464, 0.29273, 0.29035, 0.28906, 0.28680, 0.28446, 0.27905, 0.27842,
0.27555, 0.27112, 0.26879, 0.26760, 0.26547, 0.26289, 0.25914, 0.25776, 0.25641, 0.25394, 0.25148,
0.25033, 0.24752, 0.24648, 0.24424, 0.24386, 0.24097, 0.24095, 0.24104, 0.24090, 0.23948, 0.23985,
0.23916, 0.23931, 0.23869, 0.23922, 0.23671, 0.23994, 0.24009, 0.24299, 0.25392, 0.26096, 0.26740,
0.27136, 0.27207, 0.27209, 0.26671, 0.26037, 0.25427, 0.25223, 0.25006, 0.24506, 0.23531, 0.22816,
0.21955, 0.21713, 0.21705, 0.22167, 0.23419, 0.24789, 0.26416], dtype=numpy.float32)
self.MECTnoise_sigma = numpy.array(
[0.03491, 0.02537, 0.01526, 0.00798, 0.00368, 0.00220, 0.00389, 0.00819, 0.01553, 0.02466, 0.03281,
0.03765, 0.04221, 0.04212, 0.03958, 0.03447, 0.02916, 0.02766, 0.02757, 0.02671, 0.02047, 0.01121,
0.00309, 0.00321, 0.00397, 0.00433, 0.00456, 0.00514, 0.00598, 0.00674, 0.00784, 0.00852, 0.00934,
0.01040, 0.01126, 0.01257, 0.01349, 0.01460, 0.01611, 0.01770, 0.01930, 0.02084, 0.02267, 0.02354,
0.02597, 0.02677, 0.02758, 0.02859, 0.03009, 0.03126, 0.03121, 0.03174, 0.03198, 0.03140, 0.03225,
0.03150, 0.03105, 0.03154, 0.03063, 0.02965, 0.02933, 0.02837, 0.02748, 0.02662, 0.02632, 0.02540,
0.02497, 0.02515, 0.02463, 0.02431, 0.02462, 0.02559, 0.02677, 0.02757, 0.02812, 0.02728, 0.02712,
0.02635, 0.02568, 0.02622, 0.02636, 0.02611, 0.02635, 0.02649, 0.02604, 0.02533, 0.02588, 0.02643,
0.02724, 0.02824, 0.02925, 0.02916, 0.02922, 0.03064, 0.03059, 0.03050, 0.03066, 0.03251, 0.03196,
0.03219, 0.03295, 0.03199, 0.03130, 0.02980, 0.02977, 0.02886, 0.02701, 0.02579, 0.02406, 0.02252,
0.02103, 0.01931, 0.01750, 0.01566, 0.01390, 0.01238, 0.01035, 0.00918, 0.00798, 0.00687, 0.00606,
0.00523, 0.00467, 0.00423, 0.00397, 0.00430, 0.00411, 0.00344, 0.00222, 0.00929, 0.01874, 0.02600,
0.02750, 0.02828, 0.02741, 0.03276, 0.03759, 0.04272, 0.04187, 0.03968, 0.03494, 0.02768, 0.01931,
0.01083, 0.00532, 0.00377, 0.00777, 0.01520, 0.02568, 0.03590], dtype=numpy.float32)
else:
self.SECTnoise_mu = 0.2638478
self.SECTnoise_sigma = 0.022102864
elif self.x_type==TYPES["CT"]:
if self.input_channels>1:
self.MECTnoise_mu = numpy.array(
[], dtype=numpy.float32)
self.MECTnoise_sigma = numpy.array(
[], dtype=numpy.float32)
else:
self.SECTnoise_mu = 0.5
self.SECTnoise_sigma = 0.25
self.useCrop = useCrop
self.useZoom = useZoom
self.zoomFactor = zoomFactor
self.useFlipping = useFlipping
self.useMedian = useMedian
self.medianSize = [0,1,3,5,7,9,11]
self.useGaussian = useGaussian
self.gaussianRange = (0, 0.075)
# =================================== #
# End of data Augmentation parameters #
# =================================== #
# ========================================#
# == zoom-related image information ==#
# ========================================#
self.im_center = numpy.array(
[int(self.image_size[0] * self.zoomFactor - 1) / 2, int(self.image_size[1] * self.zoomFactor - 1) / 2],
dtype=numpy.int32)
self.im_shift = numpy.array([(self.image_size[0] - 1) / 2, (self.image_size[1] - 1) / 2], dtype=numpy.int32)
left = self.im_center[0] - self.im_shift[0]
right = left + self.image_size[0]
top = self.im_center[1] - self.im_shift[1]
bottom = top + self.image_size[1]
self.im_bounds = (left, right, top, bottom)
#===================================#
#== directory-related information ==#
#===================================#
self.fileArray = []
self.save_to_dir=save_to_dir
self.save_format=save_format
#==================================#
#== flat-field related variables ==#
#==================================#
self.flatField_input = None
self.flatField_output = None
#===============================#
#== caching-related variables ==#
#===============================#
self.useCache = useCache
self.cache_size = CACHE_SIZE
self.cache_period = cache_period
self.cacheX = numpy.zeros(1,dtype=numpy.float32)
self.cacheY = numpy.zeros(1,dtype=numpy.float32)
self.renew_cache = multiprocessing.Value(ctypes.c_bool,False)
self.cacheUsed_counter = multiprocessing.Value('i',0)
self.cacheRenewed_counter = multiprocessing.Value('i',0)
self._lock_ = threadLockVar
self._memlock_ = multiprocessing.Lock()
self._refreshEvent_ = multiprocessing.Event()
#======================#
#== batch size setup ==#
#======================#
self.batch_image_size_X = (self.batch_size, self.image_size[0], self.image_size[1], 1)
self.batch_image_size_Y = (self.batch_size, self.image_size[0], self.image_size[1], 1)
if self.useCrop or self.useResize:
self.batch_image_size_X = (self.batch_size, self.target_size[0], self.target_size[1], 1)
self.batch_image_size_Y = (self.batch_size, self.target_size[0], self.target_size[1], 1)
def prepareDirectFileInput(self, input_image_paths, flatFieldFilePath=None):
for entry in input_image_paths:
for name in glob.glob(os.path.join(entry, '*.h5')):
self.fileArray.append(name)
digits = re.compile(r'(\d+)')
def tokenize(filename):
return tuple(int(token) if match else token for token, match in
((fragment, digits.search(fragment)) for fragment in digits.split(filename)))
# = Now you can sort your file names like so: =#
self.fileArray.sort(key=tokenize)
self.numImages = len(self.fileArray)
# === prepare image sizes === #
inImgDims = (self.image_size[0], self.image_size[1], self.input_channels)
outImgDims = (self.image_size[0], self.image_size[1], self.output_channels)
f = h5py.File(self.fileArray[0], 'r')
# define your variable names in here
imX = numpy.array(f['Data_X'], order='F').transpose()
f.close()
if len(imX.shape) < 3:
imX = imX.reshape(imX.shape + (1,))
if imX.shape != inImgDims:
print("Error - read data shape and expected data shape of X are not equal. EXITING ...")
exit()
# === prepare caching === #
if self.useCache:
self.cacheX = numpy.zeros((self.cache_size, inImgDims[0], inImgDims[1], inImgDims[2]), dtype=numpy.float32)
self.cacheY = numpy.zeros((self.cache_size, inImgDims[0], inImgDims[1], inImgDims[2]), dtype=numpy.float32)
self.renew_cache = multiprocessing.Value(ctypes.c_bool, False)
self.cacheUsed_counter = multiprocessing.Value('i', 0)
self.__initCache_open_()
# === prepare flat-field normalization === #
if (flatFieldFilePath != None) and (len(flatFieldFilePath) > 0):
f = h5py.File(flatFieldFilePath, 'r')
self.flatField_output = numpy.array(f['data']['value']) # f['data0']
f.close()
self.flatField_input = numpy.array(self.flatField_input)
if self.output_channels==1:
#self.flatField_output = numpy.sum(self.flatField_output,2)
self.flatField_input = numpy.mean(self.flatField_output, 2)
# THIS IS THE INTERNAL FUNCTION THAT ACTUALLY LOADS THE DATA FROM FILE #
def _initCache_locked_(self):
startId = 0
loadData_flag = True
#wait_flag = True
with self._lock_:
startId = self.cacheRenewed_counter.value
if(startId>=self.cache_size):
loadData_flag = False
else:
loadData_flag=True
self.cacheRenewed_counter.value+=self.batch_size
if loadData_flag == True:
# ---------------- #
# repopulate cache #
# ---------------- #
idxArray = numpy.random.randint(0, self.numImages, self.cache_size)
for ii in itertools.islice(itertools.count(), startId, min([startId + self.batch_size, self.cache_size])):
imgIndex = idxArray[ii]
inName = self.fileArray[imgIndex]
f = h5py.File(inName, 'r')
imX = numpy.array(f['Data_X'], order='F').transpose()
imY = numpy.array(f['Data_Y'], order='F').transpose()
f.close()
if len(imX.shape) < 3:
imX = imX.reshape(imX.shape + (1,))
if len(imY.shape) < 3:
imY = imY.reshape(imY.shape + (1,))
#if imX.shape != imY.shape:
# raise RuntimeError("Input- and Output sizes do not match.")
# == Note: do data normalization here to reduce memory footprint ==#
"""
Data Normalisation
"""
if self.useNormData:
minValX, maxValX, imX = normaliseFieldArray(imX, self.input_channels, self.flatField_input, self.x_type)
minValY, maxValY, imY = normaliseFieldArray(imY, self.output_channels, self.flatField_output, self.y_type)
imX = imX.astype(numpy.float32)
imY = imY.astype(numpy.float32)
with self._memlock_:
self.cacheX[ii] = imX
self.cacheY[ii] = imY
else:
return
with self._lock_:
if self.cacheRenewed_counter.value >= self.cache_size:
self.cacheRenewed_counter.value = 0
if self.cacheUsed_counter.value >= self.cache_period:
self.cacheUsed_counter.value=0
if((startId+self.batch_size)>=self.cache_size):
self.renew_cache.value=False
return
def __initCache_open_(self):
if self.useCache:
# ---------------- #
# repopulate cache #
# ---------------- #
idxArray = numpy.random.randint(0, self.numImages, self.cache_size)
for ii in itertools.islice(itertools.count(), 0, self.cache_size):
imgIndex = idxArray[ii]
inName = self.fileArray[imgIndex]
f = h5py.File(inName, 'r')
imX = numpy.array(f['Data_X'], order='F').transpose()
imY = numpy.array(f['Data_Y'], order='F').transpose()
f.close()
if len(imX.shape) < 3:
imX = imX.reshape(imX.shape + (1,))
if len(imY.shape) < 3:
imY = imY.reshape(imY.shape + (1,))
#if imX.shape != imY.shape:
# raise RuntimeError("Input- and Output sizes do not match.")
# == Note: do data normalization here to reduce memory footprint ==#
"""
Data Normalisation
"""
if self.useNormData:
minValX, maxValX, imX = normaliseFieldArray(imX, self.input_channels, self.flatField_input, self.x_type)
minValY, maxValY, imY = normaliseFieldArray(imY, self.output_channels, self.flatField_output, self.y_type)
imX = imX.astype(numpy.float32)
imY = imY.astype(numpy.float32)
with self._memlock_:
self.cacheX[ii] = imX
self.cacheY[ii] = imY
def __len__(self):
return int(numpy.ceil(len(self.fileArray)/float(self.batch_size)))
def __getitem__(self, idx):
if self.useCache:
flushCache = False
with self._lock_:
flushCache = self.renew_cache.value
if flushCache == True:
self._initCache_locked_()
batchX = numpy.zeros(self.batch_image_size_X, dtype=numpy.float32)
batchY = numpy.zeros(self.batch_image_size_Y, dtype=numpy.float32)
idxArray = numpy.random.randint(0, self.cache_size, self.batch_size)
img_per_batch = max(1,int(self.batch_size / self.input_channels))
for j in itertools.islice(itertools.count(),0,img_per_batch):
outImgX = None
outImgY = None
if self.useCache:
#imgIndex = numpy.random.randint(0, self.cache_size)
imgIndex =idxArray[j]
with self._memlock_:
outImgX = self.cacheX[imgIndex]
outImgY = self.cacheY[imgIndex]
else:
# imgIndex = min([(idx*self.batch_size)+j, self.numImages-1,len(self.inputFileArray)-1])
imgIndex = ((idx * self.batch_size) + j) % (self.numImages - 1)
"""
Load data from disk
"""
inName = self.fileArray[imgIndex]
f = h5py.File(inName, 'r')
outImgX = numpy.array(f['Data_X'], order='F').transpose()
outImgY = numpy.array(f['Data_Y'], order='F').transpose()
f.close()
if len(outImgX.shape) < 3:
outImgX = outImgX.reshape(outImgX.shape + (1,))
if len(outImgY.shape) < 3:
outImgY = outImgY.reshape(outImgY.shape + (1,))
if outImgX.shape != outImgY.shape:
raise RuntimeError("Input- and Output sizes do not match.")
# == Note: do data normalization here to reduce memory footprint ==#
"""
Data Normalisation
"""
if self.useNormData:
minValX,maxValX,outImgX = normaliseFieldArray(outImgX, self.input_channels, self.flatField_input, self.x_type)
minValY,minValY,outImgY = normaliseFieldArray(outImgY, self.output_channels, self.flatField_output, self.y_type)
outImgX = outImgX.astype(numpy.float32)
outImgY = outImgY.astype(numpy.float32)
input_target_size = self.target_size + (self.input_channels, )
output_target_size = self.target_size + (self.output_channels, )
"""
Data augmentation
"""
if self.useZoom:
if self.useZoom:
outImgX = ndimage.zoom(outImgX, [self.zoomFactor, self.zoomFactor, 1], order=3, mode='reflect')
outImgX = outImgX[self.im_bounds[0]:self.im_bounds[1], self.im_bounds[2]:self.im_bounds[3],
0:self.input_channels]
outImgY = ndimage.zoom(outImgY, [self.zoomFactor, self.zoomFactor, 1], order=3, mode='constant')
outImgY = outImgY[self.im_bounds[0]:self.im_bounds[1], self.im_bounds[2]:self.im_bounds[3],
0:self.output_channels]
if self.useResize:
outImgX = transform.resize(outImgX, input_target_size, order=3, mode='reflect')
outImgY = transform.resize(outImgY, output_target_size, order=3, mode='reflect')
if self.useCrop:
outImgX = outImgX[self.im_bounds[0]:self.im_bounds[1], self.im_bounds[2]:self.im_bounds[3],
0:self.input_channels]
outImgY = outImgY[self.im_bounds[0]:self.im_bounds[1], self.im_bounds[2]:self.im_bounds[3],
0:self.output_channels]
if self.useFlipping:
mode = numpy.random.randint(0,4)
if mode == 0: # no modification
pass
if mode == 1:
outImgX = numpy.fliplr(outImgX)
outImgY = numpy.fliplr(outImgY)
#outImgX = ndimage.geometric_transform(outImgX, flipSpectralX, mode='nearest')
#outImgY = ndimage.geometric_transform(outImgY, flipSpectralX, mode='nearest')
if mode == 2:
outImgX = numpy.flipud(outImgX)
outImgY = numpy.flipud(outImgY)
#outImgX = ndimage.geometric_transform(outImgX, flipSpectralY, mode='nearest')
#outImgY = ndimage.geometric_transform(outImgY, flipSpectralY, mode='nearest')
if mode == 3:
outImgX = numpy.fliplr(outImgX)
outImgX = numpy.flipud(outImgX)
outImgY = numpy.fliplr(outImgY)
outImgY = numpy.flipud(outImgY)
#outImgX = ndimage.geometric_transform(outImgX, flipSpectralXY, mode='nearest')
#outImgY = ndimage.geometric_transform(outImgY, flipSpectralXY, mode='nearest')
if self.useAWGN: # only applies to input data
if self.input_channels > 1:
for channelIdx in itertools.islice(itertools.count(), 0, self.input_channels):
rectMin = numpy.min(outImgX[:,:,channelIdx])
rectMax = numpy.max(outImgX[:,:,channelIdx])
outImgX[:,:,channelIdx] = util.random_noise(outImgX[:,:,channelIdx], mode='gaussian', mean=self.MECTnoise_mu[channelIdx]*0.15, var=(self.MECTnoise_sigma[channelIdx]*0.15*self.MECTnoise_sigma[channelIdx]*0.15))
outImgX[:,:,channelIdx] = numpy.clip(outImgX[:,:,channelIdx], rectMin, rectMax)
else:
rectMin = numpy.min(outImgX[:, :, 0])
rectMax = numpy.max(outImgX[:, :, 0])
outImgX[:, :, channelIdx] = util.random_noise(outImgX[:, :, 0], mode='gaussian',
mean=self.SECTnoise_mu * 0.15, var=(self.SECTnoise_sigma * 0.15 * self.SECTnoise_sigma * 0.15))
outImgX[:, :, channelIdx] = numpy.clip(outImgX[:, :, channelIdx], rectMin, rectMax)
if self.useMedian:
mSize = self.medianSize[numpy.random.randint(0,len(self.medianSize))]
if mSize > 0:
outImgX = ndimage.median_filter(outImgX, (mSize, mSize, 1), mode='constant', cval=1.0)
if self.useGaussian:
# here, it's perhaps incorrect to also smoothen the output;
# rationale: even an overly smooth image should result is sharp outputs
sigma = numpy.random.uniform(low=self.gaussianRange[0], high=self.gaussianRange[1])
outImgX = ndimage.gaussian_filter(outImgX, (sigma, sigma, 0))
#outImgY = ndimage.gaussian_filter(outImgY, (sigma, sigma, 0))
"""
Store data if requested
"""
if self.save_to_dir != None:
sXImg = array_to_img(outImgX[:,:,0])
#save_img(os.path.join(self.save_to_dir,fname_in+"."+self.save_format),sXimg)
fname_in = "img_"+str(imgIndex)
sXImg.save(os.path.join(self.save_to_dir,fname_in+"."+self.save_format))
sYImg = array_to_img(outImgY[:,:,0])
#save_img(os.path.join(self.save_to_dir,fname_out+"."+self.save_format), sYImg)
fname_out = "img_" + str(imgIndex)
sYImg.save(os.path.join(self.save_to_dir,fname_out+"."+self.save_format))
for jj in itertools.islice(itertools.count(), 0, self.input_channels):
batchX[(j*self.input_channels)+jj,:,:,0] = outImgX[:,:,jj]
batchY[(j*self.input_channels)+jj,:,:,0] = outImgY[:,:,jj]
if self.useCache:
self._lock_.acquire()
self.cacheUsed_counter.value+=1
if int(self.cacheUsed_counter.value) >= self.cache_period:
self.renew_cache.value = True
self._lock_.release()
return batchX, batchY
def on_epoch_end(self):
self.__initCache_open_()
#class ScatterPhantomGenerator_inMemory(Sequence):
#
# def __init__(self, images_in, images_out, image_size=(128, 128,1), batch_size=1, useAWGN = False, gauss_mu=0.5, gauss_stdDev=0.1, useRotation=False, rotationRange=(0,360), targetSize=(128,128), useZoom=False, zoomFactor=1.0, useFlipping=False, useNormData=False, save_to_dir=None, save_format="png"):
# self.batch_size = batch_size
# self.image_size = image_size
# #self.image_path = image_path
# #self.augment_flow = augment_flow
# #self.k = (self.kernel_size-1)//2
# self.dtype = numpy.float32
# self.useAWGH = useAWGN
# self.gauss_mu = gauss_mu
# self.gauss_stdDev = gauss_stdDev
# self.rotationRange = rotationRange
# self.targetSize=targetSize
# self.useRotation = useRotation
# self.useClip = useRotation
# self.zoomFactor = zoomFactor
# self.useZoom = useZoom
# self.useFlipping = useFlipping
# self.useNormData = useNormData
# dims = targetSize
# self.targetSize = (self.targetSize[0], self.targetSize[1], self.image_size[2])
# #========================================#
# #== clipping-related image information ==#
# #========================================#
# self.im_center = numpy.array([(self.image_size[0]-1)/2, (self.image_size[1]-1)/2], dtype=numpy.int32)
# self.im_size = numpy.array([(self.targetSize[0]-1)/2, (self.targetSize[1]-1)/2], dtype=numpy.int32)
# left = self.im_center[0]-self.im_size[0]
# right = left+self.targetSize[0]
# top = self.im_center[1]-self.im_size[1]
# bottom = top+self.targetSize[1]
# self.im_bounds = (left,right,top,bottom)
# #===================================#
# #== directory-related information ==#
# #===================================#
# self.X = images_in
# self.Y = images_out
# self.numImages = self.X.shape[0]
# outImgX = self.X[0]
# if len(outImgX)<3:
# outImgX = outImgX.reshape(outImgX.shape + (1,))
# self.image_size =outImgX.shape
# self.save_to_dir=save_to_dir
# self.save_format=save_format
#
# def __len__(self):
# return int(numpy.ceil(self.numImages/float(self.batch_size)))
#
# def __getitem__(self, idx):
# batchX = numpy.zeros((self.batch_size,self.image_size[0],self.image_size[1],self.image_size[2]),dtype=self.dtype)
# batchY = numpy.zeros((self.batch_size,self.image_size[0],self.image_size[1],self.image_size[2]),dtype=self.dtype)
# if self.useClip or self.useZoom:
# batchX = numpy.zeros((self.batch_size,self.targetSize[0],self.targetSize[1],self.image_size[2]),dtype=self.dtype)
# batchY = numpy.zeros((self.batch_size,self.targetSize[0],self.targetSize[1],self.image_size[2]),dtype=self.dtype)
# for j in itertools.islice(itertools.count(),0,self.batch_size):
# #imgIndex = min([(idx*self.batch_size)+j, self.numImages-1])
# imgIndex = ((idx*self.batch_size)+j) % (self.numImages-1)
# #if shuffle:
# # batchIndex = numpy.random.randint(0, min([self.numImages,len(self.inputFileArray)]))
# """
# Load data from memory
# """
# outImgX = self.X[imgIndex]
# outImgY = self.Y[imgIndex]
# if len(outImgX)<3:
# outImgX = outImgX.reshape(outImgX.shape + (1,))
# if len(outImgY)<3:
# outImgY = outImgY.reshape(outImgY.shape + (1,))
# if outImgX.shape != outImgY.shape:
# raise RuntimeError("Input- and Output sizes do not match.")
# #self.image_size =outImgX.shape
# """
# Data augmentation
# """
# if self.useNormData:
# minValX = numpy.min(outImgX)
# maxValX = numpy.max(outImgX)
# outImgX = (outImgX-minValX)/(maxValX-minValX)
# outImgX = outImgX.astype(numpy.float32)
# minValY = numpy.min(outImgY)
# maxValY = numpy.max(outImgY)
# outImgY = (outImgY-minValY)/(maxValY-minValY)
# outImgY = outImgY.astype(numpy.float32)
# if self.useZoom:
# outImgX = ndimage.zoom(outImgX, [self.zoomFactor,self.zoomFactor,1], order=3)
# outImgY = ndimage.zoom(outImgY, [self.zoomFactor,self.zoomFactor,1], order=3)
# if self.useRotation:
# outImgX = ndimage.rotate(outImgX, numpy.random.uniform(self.rotationRange[0], self.rotationRange[1]), axes=(1,0), order=2, mode='mirror')
# outImgY = ndimage.rotate(outImgY, numpy.random.uniform(self.rotationRange[0], self.rotationRange[1]), axes=(1,0), order=2, mode='mirror')
# if self.useClip:
# outImgX = outImgX[self.im_bounds[0]:self.im_bounds[1],self.im_bounds[2]:self.im_bounds[3],0:self.targetSize[2]]
# outImgY = outImgY[self.im_bounds[0]:self.im_bounds[1],self.im_bounds[2]:self.im_bounds[3],0:self.targetSize[2]]
# if self.useAWGH: # only applies to input data
# outImgX = util.random_noise(outImgX, mode='gaussian', mean=self.gauss_mu, var=(self.gauss_stdDev*self.gauss_stdDev))
# if self.useFlipping:
# mode = numpy.random.randint(0,4)
# if mode == 0: # no modification
# pass
# if mode == 1:
# outImgX = numpy.fliplr(outImgX)
# outImgY = numpy.fliplr(outImgY)
# #outImgX = ndimage.geometric_transform(outImgX, flipSpectralX, mode='nearest')
# #outImgY = ndimage.geometric_transform(outImgY, flipSpectralX, mode='nearest')
# if mode == 2:
# outImgX = numpy.flipud(outImgX)
# outImgY = numpy.flipud(outImgY)
# #outImgX = ndimage.geometric_transform(outImgX, flipSpectralY, mode='nearest')
# #outImgY = ndimage.geometric_transform(outImgY, flipSpectralY, mode='nearest')
# if mode == 3:
# outImgX = numpy.fliplr(outImgX)
# outImgX = numpy.flipud(outImgX)
# outImgY = numpy.fliplr(outImgY)
# outImgY = numpy.flipud(outImgY)
# #outImgX = ndimage.geometric_transform(outImgX, flipSpectralXY, mode='nearest')
# #outImgY = ndimage.geometric_transform(outImgY, flipSpectralXY, mode='nearest')
# """
# Store data if requested
# """
# if self.save_to_dir != None:
# sXImg = array_to_img(outImgX[:,:,0])
# #save_img(os.path.join(self.save_to_dir,fname_in+"."+self.save_format),sXimg)
# sXimg.save(os.path.join(self.save_to_dir,fname_in+"."+self.save_format))
# sYImg = array_to_img(outImgY[:,:,0])
# #save_img(os.path.join(self.save_to_dir,fname_out+"."+self.save_format), sYImg)
# sYimg.save(os.path.join(self.save_to_dir,fname_out+"."+self.save_format))
# batchX[j] = outImgX
# batchY[j] = outImgY
# return batchX, batchY
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by pbpoon on 2018/11/8
from django.contrib.auth.models import User
from selectable.base import ModelLookup
from selectable.registry import registry
from partner.models import Partner
from .models import PurchaseOrder
class PurchaseOrderLookup(ModelLookup):
model = User
search_fields = ('name__icontains',)
registry.register(PurchaseOrderLookup)
|
import torch
import torch.nn as nn
class ThetaBorisovLoss(nn.Module):
def __init__(self, device):
super().__init__()
self.device = device
self.eps = 1e-7
def forward(self, tensor_mat_a, tensor_mat_b):
"""
Compute difference between rotation matrix as in http://boris-belousov.net/2016/12/01/quat-dist/ on batch tensor
:param tensor_mat_a: a tensor of rotation matrices in format [B x 3 X 3]
:param tensor_mat_b: a tensor of rotation matrices in format [B x 3 X 3]
:return: B values in range [0, 3.14]
"""
mat_rotation = torch.bmm(tensor_mat_a, tensor_mat_b.transpose(2, 1))
identity = torch.eye(3, requires_grad=True, device=self.device)
identity = identity.reshape((1, 3, 3))
batch_identity = identity.repeat(tensor_mat_a.size(0), 1, 1)
trace = ((batch_identity * mat_rotation).sum(dim=(1, 2)) - 1) * 0.5
trace = trace.clamp(min=-1 + self.eps, max=1 - self.eps)
angles = torch.acos(trace)
return angles
class ChamferLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, points_src, points_trg):
"""
Compute the Chamfer distances between two points set
:param points_src: source input points [B X NUM_POINTS_ X CHANNELS]
:param points_trg: target input points [B X NUM_POINTS_ X CHANNELS]
:return two tensors, one for each set, containing the minimum squared euclidean distance between a point
and its closest point in the other set
"""
x, y = points_src, points_trg
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_indices = torch.arange(0, num_points).type(torch.cuda.LongTensor) if points_src.device.type == 'cuda' else torch.arange(0, num_points).type(torch.LongTensor)
x_squared = xx[:, diag_indices, diag_indices].unsqueeze(1).expand_as(xx)
y_squared = yy[:, diag_indices, diag_indices].unsqueeze(1).expand_as(yy)
distances = (x_squared.transpose(2, 1) + y_squared - 2 * zz)
return distances.min(1)[0], distances.min(2)[0]
|
from week04.day_01.dungeon import Dungeon
from week04.day_01.entities import Hero, Enemy
from week04.day_01.spells import Spell
from week04.day_01.items import Weapon
class Fight():
def __init__(self, hero: Hero, enemy: Enemy, initial_attack):
print('A fight is started between our Hero {} and Enemy {}'.format(
hero.name, enemy.name
))
# hero attacks first by choice
hero.initial_attack(enemy, initial_attack)
while True:
self.__enemy_turn(hero, enemy)
# hero attack
killing_blow = hero.attack(victim=enemy)
if killing_blow:
hero.leave_combat()
break
def __enemy_turn(self, hero: Hero, enemy: Enemy):
# the enemy either moves to the hero or attacks him
if enemy.x_coord != hero.x_coord and enemy.y_coord != hero.y_coord:
# move enemy
enemy.move_toward(hero)
else:
enemy.attack(victim=hero)
def sample_game_run():
# TOOD: TESTS AND FURTHER REFINEMENT!
h = Hero(name="Bron", title="Dragonslayer", health=100, mana=100, mana_regen_rate=2)
w = Weapon(name="The Axe of Destiny", damage=20)
h.equip(w)
s = Spell(name="Fireball", damage=30, mana_cost=50, cast_range=2)
h.learn(s)
map = Dungeon("level1.txt", hero=h)
map.spawn(h)
map.print_map()
map.move_hero("right")
map.move_hero("down")
map.print_map()
map.hero_attack(by="spell")
map.move_hero("down")
map.move_hero("down")
map.print_map()
map.move_hero("right")
map.move_hero("right")
map.move_hero("down")
map.print_map()
map.move_hero("up")
map.move_hero("right")
map.move_hero("right")
map.move_hero("right")
map.print_map()
map.hero_attack(by="spell")
map.print_map()
map.move_hero("up")
map.move_hero("up")
map.move_hero("up")
map.move_hero("right")
map.move_hero("right")
map.move_hero("right")
map.move_hero("right")
map.hero_attack(by="spell")
map.print_map()
map.move_hero("down")
map.move_hero("down")
map.move_hero("down")
map.move_hero("down")
def main():
sample_game_run()
if __name__ == "__main__":
main() |
# Name: Max Radke Date: Febuary 7, 2021
# College: Oregon State University
# Class: CS 362 Assignment: Homework 4
# Description: Takes two name strings and returns a full name
def getName(str1, str2):
# Make sure there is only one name in each
if ' ' in str1 or ' ' in str2:
raise NameError
str1 = str1 + " " # Add a space
return str1 + str2 # Return full name |
import search_engine
if __name__ == '__main__':
search_engine.main('corpus', 'posting', True, 'queries.txt', 100)
|
Km = '1.0'
Conc = '0.5'
ReacType = 'MA'
Keq = 'KEQ=1'
Default_Reac_List = 'MAIR MMIR'
KREG_KM = '1'
VM = '1'
|
from django.contrib import admin
from .models import News
class NewsAdmin(admin.ModelAdmin):
list_display = ('id','title','created_at','is_published')
list_display_links=('id','title')
search_fields = ('title','content')
admin.site.register(News,NewsAdmin)
# Register your models here.
|
# 1.读取xlrd
# 2.写入xlwt
# 1.步骤
# 1.打开工作簿
# 2.确定哪一个选项卡
# 3.确定表格的XY坐标,才能读取数据
import xlrd
#工作簿
wb = xlrd.open_workbook(filename=r"D:\Python自动化测试\Python\day07\day07【mysql工具类与excel读取】\2020年每个月的销售情况.xlsx")
time=['1月','2月','3月','4月','5月','6月','7月','8月','9月','10月','11月','12月']
#求总销售额
#sumz=0
sum = 0
for a in range(0,12):
sheet = wb.sheet_by_name(time[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
for i in range(1,rows):
data = sheet.row_values(i)
sum= sum + data[2] *data[4]
#sumz=sumz+sum
print("总销售总额:",round(sum,2))
#求12个月的总销量
sumz1=0
for a in range(0,12):
sheet = wb.sheet_by_name(time[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
sum1= sum1 + data[4]
sumz1=sumz1+sum1
print("总销售量:",round(sumz1))
#求每种衣服(件数)的占比
#输出所有衣服
bag={}
for a in range(0,12):
sheet = wb.sheet_by_name(time[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
if data[1] in bag :
bag[data[1]]=bag[data[1]]+data[4]
else :
bag[data[1]] = data[4]
#print(bag)
print("------------------------------------------")
for name in bag:
print(name,"的销售量占比为:", round(bag[name] / round(sumz1) * 100,2), "%")
print("------------------------------------------")
money={}
for a in range(0,12):
sheet = wb.sheet_by_name(time[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
if data[1] in money:
money[data[1]] = money[data[1]] + data[4] * data[2]
else :
money[data[1]] = data[4] * data[2]
#print(money)
print("------------------------------------------")
for name in money:
print(name,"的销售额占比为:", round(money[name] / round(sum) * 100,2), "%")
print("------------------------------------------")
p=max(bag.values())
for i in bag.keys():
if bag[i] ==p:
print("最畅销的衣服是:", i)
p=min(bag.values())
for i in bag.keys():
if bag[i] ==p:
print("全年销量最低的衣服是:", i)
# book = {}
# j4 = ("11月", "12月", "1月")
# for i in j4: # 第四季度最畅销的衣服是那种
# g = zd(book, i)
# for i in book.keys():
# if book[i] == g:
# print("第四季度最畅销的衣服是:", i)
time1=("2月", "3月", "4月")
#输出所有衣服
bag={}
for a in range(0,3):
sheet = wb.sheet_by_name(time1[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
if data[1] in bag :
bag[data[1]]=bag[data[1]]+data[4]
else :
bag[data[1]] = data[4]
p1=max(bag.values())
for i in bag.keys():
if bag[i] ==p1:
print("第一a季度最畅销的衣服是:", i)
time2=("5月", "6月", "7月")
#输出所有衣服
bag={}
for a in range(0,3):
sheet = wb.sheet_by_name(time2[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
if data[1] in bag :
bag[data[1]]=bag[data[1]]+data[4]
else :
bag[data[1]] = data[4]
p1=max(bag.values())
for i in bag.keys():
if bag[i] ==p1:
print("第二季度最畅销的衣服是:", i)
time3=("8月", "9月", "10月")
#输出所有衣服
bag={}
for a in range(0,3):
sheet = wb.sheet_by_name(time3[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
if data[1] in bag :
bag[data[1]]=bag[data[1]]+data[4]
else :
bag[data[1]] = data[4]
p1=max(bag.values())
for i in bag.keys():
if bag[i] ==p1:
print("第三季度最畅销的衣服是:", i)
time4=("11月", "12月", "1月")
#输出所有衣服
bag={}
for a in range(0,3):
sheet = wb.sheet_by_name(time4[a])
#获取有多少行,多少列
rows =sheet.nrows
#print(rows," ",cols)
sum1 = 0
for i in range(1,rows):
data = sheet.row_values(i)
if data[1] in bag :
bag[data[1]]=bag[data[1]]+data[4]
else :
bag[data[1]] = data[4]
p1=max(bag.values())
for i in bag.keys():
if bag[i] ==p1:
print("第四季度最畅销的衣服是:", i) |
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True, autoincrement=True, unique=True)
email = Column(String, unique=True)
first_name = Column(String)
last_name = Column(String)
created_at = Column(DateTime)
updated_at = Column(DateTime)
|
#!/usr/bin/env python
#coding:utf-8
'''
Created on Mar 6, 2016
@author: yang.hongsheng
'''
from django.conf.urls import url
from app import views
urlpatterns = [
url(r'^app/$', views.index),
url(r'^home/$', views.home_user, name='home'),
#etc.
url(r'^conta', views.conta, name='conta'),
url(r'^ajax_list/$', views.ajax_list, name='ajax-list'),
url(r'^ajax_dict/$', views.ajax_dict, name='ajax-dict'),
url(r'^add/$', views.add, name='add'),
url(r'^t/$', views.t, name='t'),
] |
studentList = []
students = {}
def create():
print("Create Student Record...")
studentId = int(input("Enter ID : "))
studentName = input("Enter name : ")
studentCourse = input("Enter course : ")
studentNo = int(input("Enter number : "))
students["Id"] = studentId
students["Name"] = studentName
students["Course"] = studentCourse
students["No"] = studentNo
# studentList.append([studentId, studentName, studentCourse, studentNo])
studentList.append(students.copy())
for s in studentList:
print(s)
def read():
print("Read Student Record...")
for s in studentList:
print(s)
def update():
print("Update Student Record...")
studentId = int(input("Enter Student ID you want to update data about : "))
for i in range(len(studentList)):
if studentList[i]['Id'] == studentId:
print(studentList[i])
print("Data you want to update: ")
print("""
1. Name
2. Course
3. Number
""")
choice = int(input("Enter your choice : "))
if choice == 1:
new_name = input("Enter updated Name:")
studentList[i]['Name'] = new_name
elif choice == 2:
new_course = input("Enter updated Course:")
studentList[i]['Course'] = new_course
elif choice == 3:
new_number = input("Enter updated Number:")
studentList[i]['No'] = new_number
else:
pass
def delete():
print("Delete Student Record...")
studentId = int(input("Enter Student ID you want to delete : "))
for i in range(len(studentList)):
if studentList[i]['Id'] == studentId:
print("Student Exist")
del studentList[i]
print("Deleted Successfully...")
print("Updated List...")
read()
else:
pass
def search():
print("Search Student Record...")
studentId = int(input("Enter Student ID you want to search : "))
for i in range(len(studentList)):
if studentList[i]['Id'] == studentId:
print(studentList[i])
else:
pass
def sortStudents():
print("Sort Student Record...")
print("""
1. Name
2. Id
""")
order = int(input("Enter the order you want to sort the data:"))
if order == 1:
sortedList = sorted(studentList, key=lambda x : x["Name"])
for s in sortedList:
print(s)
elif order == 2:
sortedList = sorted(studentList, key=lambda x : x["Id"])
for s in sortedList:
print(s)
else:
print("Incorrect option..")
def save():
with open("students.txt", "a") as file:
# file.write(str(studentList))
for data in studentList:
formattedData = str(data).strip("{}")
file.write(formattedData + "\n")
#def load():
# print("Load Student Record...")
# filename = input("Enter the name of file you want to load data from:")
# raw_data = open(filename,'rt')
# data = numpy.loadtxt(raw_data, delimiter=",")
# print("Shape of data: ",data.shape)
# studentList.append(data)
# print("Shape of Student Record: ",len(studentList))
while True:
print("""
1. Create Record
2. Read Record
3. Update Record
4. Delete Record
5. Search Record
6. Sort Record
7. Save Record
8. Quit
""")
todo = {
"1": create,
"2": read,
"3": update,
"4": delete,
"5": search,
"6": sortStudents,
"7": save,
"8": quit
}
userChoice = input("Enter your choice : ")
todo.get(userChoice)() |
"""Not using this as of now. This is to represent an integer node and it has the token"""
class NumNode(AST):
def __init__(self, token):
self.token = token
#I do not have a token class as of now
#self.value = self.token.value
##############################################################################
#OLD CODE
##class Parser(object):
## def printstack(self):
## i = len(self.evalstack)-1
## stackstr = ""
## while(i >= 0):
## stackstr = stackstr + " ||" + str(self.evalstack[i])
## i=i-1
## print(stackstr)
##
## #recursion and use a stack
## def evaluatemathExprTree(self, tree):
## #Bug
## #stack = []
## #Recursion - stack is being emptied every time
##
## #make it global it is recursive
## #value = 0
## if tree is not None:
## self.evaluatemathExprTree(tree.left)
## self.evaluatemathExprTree(tree.right)
## print(tree.opvalue)
## if(tree.opvalue == '+'):
## op1 = stack.pop()
## op2 = stack.pop()
## self.evalstack.append(float(op1) + float(op2))
## self.printstack()
## elif(tree.opvalue == '-'):
## #print(tree.opvalue)
## #print(stack.pop())
## op1 = stack.pop()
## op2 = stack.pop()
## #print(int(op1))
## #print(int(op2))
## #Bug: int typecasting uses 0.8 as 0
## self.evalstack.append(float(op1) - float(op2))
## self.printstack()
## elif(tree.opvalue == '*'):
## op1 = stack.pop()
## op2 = stack.pop()
## self.evalstack.append(float(op1) * float(op2))
## self.printstack()
## elif(tree.opvalue == '/'):
## op1 = stack.pop()
## op2 = stack.pop()
## self.evalstack.append(float(op1) / float(op2))
## self.printstack()
## else:
## self.evalstack.append(float(tree.opvalue))
## self.printstack()
## """Maitains the lexer and the math expression"""
## def __init__(self, mathexpr):
## self.mathexpr = mathexpr
## self.lexer = Lexer.Lexer(mathexpr)
## self.current_token = None
## self.ast = None
## self.stack = []
## self.evalstack = []
##
##
## def factor(self):
## node = self.ast
## #get the current token
## self.current_token = self.lexer.get_next_token()
##
## if(self.current_token.type == Lexer.Token.INTEGER):
## #not using numnode for now
## #node = NumNode(self.current_token)
## node = BinOpNode(None,self.current_token.value,None)
## #consume it and and add operator in the stack
## #added the operator on the stack
## self.stack.append(node)
## #BUG:consumedont consume here, happening at the term function
## self.current_token = self.lexer.get_next_token()
## #Debugging
## print("factor int token type = ", self.current_token.type)
## print("factor int token value = ", self.current_token.value)
##
## #Debugging
## print("factor int node value = ", node.opvalue)
## print("factor int node left = ", node.left)
## print("factor int node right = ", node.right)
## return node
## elif(self.current_token.type == Lexer.Token.LPAREN):
## #consume it and and add operator in the stack
## #consume
## self.current_token = self.lexer.get_next_token()
## node = expression()
## #Debugging
## print("factor paren value = ", node.opvalue)
## print("factor paren left = ", node.left)
## print("factor paren right = ", node.right)
## return node
## elif(self.current_token.type == Lexer.Token.RPAREN):
## print("right paren")
## self.current_token = self.lexer.get_next_token()
## return self.ast #????
## elif(self.current_token.type == Lexer.Token.EOF):
## print("EOF")
##
## node = BinOpNode(None,self.current_token.value,None)
## #Debugging
## print("factor eof value = ", node.opvalue)
## print("factor eof left = ", node.left)
## print("factor eof right = ", node.right)
## return node
##
## """term : factor ((MUL | DIV) factor)* """
## def term(self):
## node = self.ast
## node = self.factor()
## newnode = BinOpNode(None, None, None)
## #BUG: not required here - happening inside the while get the current token
## #self.current_token = self.lexer.get_next_token()
## #Debugging
## print("term token type = ", self.current_token.type)
## print("term token value = ", self.current_token.value)
##
## while(self.current_token.type in (Lexer.Token.MUL, Lexer.Token.DIV)):
## #consume it and and add operator in the stack
## #added the operator on the stack
## self.stack.append(node)
## #consume
## self.current_token = self.lexer.get_next_token()
## #Debug
## print("term post consume token type = ", self.current_token.type)
## print("term post consume token value = ", self.current_token.value)
## #Calling to get the next term
## newnode = self.factor()
## if(self.current_token.type == Lexer.Token.MUL):
## self.ast = BinOpNode(node,'*',newnode)
## elif(self.current_token.type == Lexer.Token.DIV):
## self.ast = BinOpNode(node,'/',newnode)
##
##
## #Debugging
## print("term node value = ", node.opvalue)
## print("term node left = ", node.left)
## print("term node right = ", node.right)
##
## print("term newnode value = ", newnode.opvalue)
## print("term newnode left = ", newnode.left)
## print("term newnode right = ", newnode.right)
##
## return node
##
## """expr : term ((PLUS | MINUS) term)* Parses the expression - the upper most"""
## def expression(self):
## node = self.ast
## #calling to get the first term
## node = self.term()
## newnode = BinOpNode(None,None,None)
## #BUG: dont consume here . happening in while get the current token
## #self.current_token = self.lexer.get_next_token()
## #Debugging
## print("expr token type = ", self.current_token.type)
## print("expr token value = ", self.current_token.value)
##
## while(self.current_token.type in (Lexer.Token.PLUS, Lexer.Token.MINUS)):
## #consume it and and add operator in the stack
## #added the operator on the stack
## self.stack.append(node)
## #consume
## self.current_token = self.lexer.get_next_token()
## #Debug
## print("expr post consume token type = ", self.current_token.type)
## print("expr post consume token value = ", self.current_token.value)
## #Calling to get the next term
## newnode = self.term()
## if(self.current_token.type == Lexer.Token.PLUS):
## self.ast = BinOpNode(node,'+',newnode)
## elif(self.current_token.type == Lexer.Token.MINUS):
## self.ast = BinOpNode(node,'-',newnode)
## #Debugging
## print("expr node value = ", node.opvalue)
## print("expr node left = ", node.left)
## print("expr node right = ", node.right)
##
## print("expr newnode value = ", newnode.opvalue)
## print("expr newnode left = ", newnode.left)
## print("expr newnode right = ", newnode.right)
##
## return node
##
## def parse(self):
## node = self.expression()
##
## #Debugging
## print("parse value = ", node.opvalue)
## print("parse left = ", node.left)
## print("parse right = ", node.right)
## return node
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
import datetime
# Import from our database_setup file
from puppies import Base, Shelter, Puppy
from puppies_extra import PuppyProfile, Person, Adoption
# The create_engine function let's our program know with which database we
# want to communicate with.
engine = create_engine('sqlite:///puppyshelter.db')
# # Bind the engine to the base class. This will make the connections between
# # our class definitions and the corresponding tables in our database
Base.metadata.bind = engine
# Create a sessionmaker object. This stablishs a link of communication
# between our code executions and the engine we just created.
DBSession = sessionmaker(bind=engine)
# Let's create a session so that we can send commands
session = DBSession()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxx Exercise 3 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# In this exercise you will create puppy profiles to collect even more
# information about each puppy. Each puppy is allowed one profile which can
# contain a url to the puppy’s photo, a description about the puppy, and
# any special needs the puppy may have. Implement this table and the
# foreign key relationship in your code.
description = "Cute little puppy."
special_needs = "yammy yammy food"
puppyProfile = PuppyProfile(url='/images/puppy1.jpg',
description=description,
special_needs=special_needs,
puppy_id=1)
session.add(puppyProfile)
session.commit()
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countNodes(self, root: TreeNode) -> int:
if not root:
return 0
else:
return self.countNodes(root.left) + self.countNodes(root.right) + 1
"""
Depth of tree == root to left most leaf.
Based on "all nodes in the last level are as far left as possible"
if depth of left tree== depth of right tree:
left tree is perfect bst (num of node = 2**h -1)
right tree is complete bst (of course)
if depth of left tree > depth of right tree:
right tree is perfect bst (binary tree every level, except possibly the last, is completely filled)
left tree is complete bst (of course)
"""
class Solution:
def countNodes(self, root: TreeNode) -> int:
if not root:
return 0
d_left = self.depth(root.left)
d_right = self.depth(root.right)
if d_left == d_right:
return 1 + 2 ** d_left - 1 + self.countNodes(root.right)
if d_left > d_right:
return self.countNodes(root.left) + 1 + 2 ** d_right - 1
def depth(self, node):
d = 0
while node:
d += 1
node = node.left
return d |
#%%
n, m = input().split()
n, m = int(n), int(m)
#%%
# n, k, c = input().split()
n, k, c = "11 3 2".split()
n, k, c = int(n), int(k), int(c)
x = "xoxxxoxxxoo"
x = [1 if x == "o" else 0 for x in x]
# %%
|
import twint
import heapq
import matplotlib.pyplot as plt
def get_similar_hashtags(seed_hashtag, limit):
c = twint.Config()
c.Hide_output = True # hides command line verbose output
c.Limit = limit # maximum number of tweets to pull
c.Store_object = True
c.Search = seed_hashtag
twint.run.Search(c)
tweets = twint.output.tweets_list
# counts occurrence of hashtags in the tweets
hashtags_dict = {}
for tweet in tweets:
for hashtag in tweet.hashtags:
if hashtag in hashtags_dict:
hashtags_dict[hashtag] += 1
else:
hashtags_dict[hashtag] = 1
# del hashtags_dict[seed_hashtag] #gets rid of seed hashta
top_hashtags = heapq.nlargest(10, hashtags_dict, key=hashtags_dict.get) # gets highest hashtags
# makes dictionary of just highest ones
hashtags_ranked = {}
for hashtag in top_hashtags:
hashtags_ranked[hashtag] = hashtags_dict[hashtag]
plt.barh(range(len(hashtags_ranked)), list(hashtags_ranked.values()), align='center', color='maroon')
plt.yticks(range(len(hashtags_ranked)), list(hashtags_ranked.keys()))
plt.gca().invert_yaxis() # just to have the highest bar at the top
plt.title("Most Related Hashtags to " + seed_hashtag)
plt.savefig(seed_hashtag + '.png', bbox_inches='tight') # saves the visualization as png
#plt.savefig(seed_hashtag + '.pdf', bbox_inches='tight')
plt.show()
print("List of most related hashtags to "+ seed_hashtag + " :")
print(top_hashtags) # displays the top 15 hashtags as a list.
plt.close()
twint.output.tweets_list = []
def main():
seed_hashtags =["#tiktok", "#blacklivesmatter", "#google", "#covid19", "#carryminati"]
limit = 500 # limits the number of tweets to pull
for seed_hashtag in seed_hashtags:
get_similar_hashtags(seed_hashtag, limit)
main()
|
import textwrap
from SC import mark
def indent(n):
return textwrap.indent(str(n), ' ')
class Var:
def __init__(self, tp):
self.tp = tp
def __str__(self):
return 'var ' + str(getattr(self, 'name', '')) + ' lev ' + \
str(getattr(self, 'lev', '')) + ':\n' + indent(self.tp)
class Ref:
def __init__(self, tp):
self.tp = tp
def __str__(self):
return 'ref ' + str(getattr(self, 'name', '')) + ' lev ' + \
str(getattr(self, 'lev', '')) + ': ' + str(self.tp)
class Const:
def __init__(self, tp, val):
self.tp, self.val = tp, val
def __str__(self):
return 'const ' + str(getattr(self, 'name', '')) + ': ' + \
str(self.tp) + ' = ' + str(self.val)
class Type:
def __init__(self, tp):
self.tp, self.val = None, tp
def __str__(self):
return 'type ' + str(getattr(self, 'name', '')) + indent(self.val)
class Proc:
def __init__(self, par):
self.tp, self.par = None, par
def __str__(self):
return 'proc ' + self.name + ' lev ' + str(self.lev) + \
'(' + str([str(s) for s in self.par]) + ')'
class StdProc:
def __init__(self, par):
self.tp, self.par = None, par
def __str__(self):
return 'stdproc ' + self.name + ' lev ' + str(self.lev) + ' par\n' + \
indent([str(s) for s in self.par])
class Int: pass
class Bool: pass
class Enum: pass # for adding enumeration types
class Record:
def __init__(self, fields):
self.fields = fields
def __str__(self):
return 'record\n' + \
indent('\n'.join(str(f) for f in self.fields))
class Array:
def __init__(self, base, lower, length):
self.base, self.lower, self.length = base, lower, length
def __str__(self):
return 'array lower ' + str(self.lower) + ' length ' + \
str(self.length) + ' base\n' + indent(self.base)
def init():
global symTab
symTab = [[]]
def printSymTab():
for l in symTab:
for e in l: print(e)
print()
def newDecl(name, entry):
top, entry.lev, entry.name = symTab[0], len(symTab) - 1, name
for e in top:
if e.name == name:
mark("multiple definition"); return
top.append(entry)
def find(name):
for l in symTab:
for e in l:
if name == e.name: return e
mark('undefined identifier ' + name)
return Const(None, 0)
def openScope():
symTab.insert(0, [])
def topScope():
return symTab[0]
def closeScope():
symTab.pop(0)
|
"""
TODO Write module docstring
"""
# pylint: disable=missing-docstring
from pathlib import Path
from typing import Sequence
import click
from flatboobs import logging
@click.group()
@click.option('--debug/-no-debug', default=False)
def main(
debug: bool = False,
):
logging.setup_logging(debug)
@main.command(help="Generates C++ [de]serializer code.")
@click.option(
'--output-dir', '-o', default='./',
type=click.Path(
file_okay=False, dir_okay=True, writable=True,
resolve_path=True))
@click.option(
'--include-path', '-I', multiple=True,
type=click.Path(
file_okay=False, dir_okay=True, readable=True,
resolve_path=True))
@click.option(
'--header-only/--no-header-only', default=False,
help="Generated header only library.")
@click.option(
'--clang-format/--no-clang-format', default=True,
help="Apply clang-format.")
@click.argument('library_name', type=str)
@click.argument(
'schema_file', nargs=-1,
type=click.Path(
file_okay=True, dir_okay=False, readable=True, resolve_path=True))
def cpp(
# pylint: disable=too-many-arguments
output_dir: str = './',
include_path: Sequence[str] = tuple(),
library_name: str = "",
schema_file: Sequence[str] = tuple(),
**kwargs
):
from flatboobs.codegen.generate_cpp import generate_cpp
generate_cpp(
list(map(Path, schema_file)),
list(map(Path, include_path)),
Path(output_dir),
library_name,
options=kwargs
)
@main.command(name="list",
help="Generates list of schema files with all includes.")
@click.option(
'--include-path', '-I', multiple=True,
type=click.Path(
file_okay=False, dir_okay=True, readable=True,
resolve_path=True))
@click.argument(
'schema_file', nargs=-1,
type=click.Path(
file_okay=True, dir_okay=False, readable=True, resolve_path=True))
def list_(
# pylint: disable=too-many-arguments
include_path: Sequence[str] = tuple(),
schema_file: Sequence[str] = tuple(),
**kwargs
):
from flatboobs.codegen.generate_list import generate_list
generate_list(
list(map(Path, schema_file)),
list(map(Path, include_path)),
options=kwargs
)
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
# pylint: disable=unexpected-keyword-arg
main(obj=dict())
|
from pathlib import Path
import os
import logging
import sys
import boto3
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def main():
bucket = os.getenv("QUOTES_INDEX_S3_BUCKET")
s3_client = boto3.client("s3")
quotes_files = list((Path(__file__).parent.parent / "quotes").glob("*txt"))
for ind, f in enumerate(quotes_files, start=1):
_ = s3_client.upload_file(f.as_posix(), bucket, f.name)
if ind % 10 == 0:
logger.info(f"Uploaded {ind} files")
if __name__ == "__main__":
main()
|
from django.shortcuts import render
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from .models import DataModel
from .serializers import DataSerializers
# Create your views here.
class ListDisplay(ListCreateAPIView):
queryset = DataModel.objects.all()
serializer_class = DataSerializers
authentication_classes = [SessionAuthentication]
permission_classes = [IsAuthenticated]
class RetriveData(RetrieveUpdateDestroyAPIView):
queryset = DataModel.objects.all()
serializer_class = DataSerializers
authentication_classes = [SessionAuthentication]
permission_classes = [IsAuthenticated]
|
"""
提供SE3的各种运算
"""
import tensorflow as tf
from simpleRotate.tf2.SO3_tf2 import SO3_tf2
from .Dtype import dtype
class SE3_tf2:
@classmethod
def normalize(cls, matrix):
trans = matrix[:, 3]
rotationMatrix = matrix[:3, :3]
SO3 = SO3_tf2.normalize(rotationMatrix)
SE3 = tf.eye(4, dtype=dtype)
SE3[:3, :3] = SO3
SE3[:, 3] = trans
return SE3
@classmethod
def exp(cls, se3):
t = se3[:3]
so3 = se3[3:]
SO3 = SO3_tf2.exp(so3)
# Vt = tf.matmul(SO3_tf2.left_jacobi(so3), tf.expand_dims(t, axis=1))
mat = tf.concat((SO3, tf.expand_dims(t, axis=1)), axis=1)
mat = tf.concat((mat, tf.constant([[0, 0, 0, 1]], dtype=dtype)), axis=0)
return mat
@classmethod
def log(cls, SE3):
SO3 = SE3[:3, :3]
so3 = SO3_tf2.log(SO3)
trans = SE3[:3, 3]
t = tf.matmul(SO3_tf2.inv_left_jacobi(so3), tf.expand_dims(trans, axis=1))
t = tf.squeeze(t, axis=1)
se3 = tf.concat((t, so3), axis=0)
return se3
# @classmethod
# def wedge(cls, xi):
#
# Xi = tf.eye(4, dtype=dtype)
#
# Xi[:3, :3] = SO3_mat.wedge(xi[3:])
# Xi[:, 3] = Xi[:3]
#
# return Xi
#
# @classmethod
# def vee(cls, Xi):
# if Xi.shape != (4, 4):
# raise ValueError("shape wrong")
# phi = SO3_mat.vee(Xi[:3, :3])
# xi_t = Xi[:3, 3]
# xi = np.hstack((xi_t, phi))
# return xi |
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List, Optional
from statistics import stdev
from abr.video import get_video_chunks
from exp_util.env import experiments, experiment, run_subexp, run_trace, run_traffic
from exp_util.data import Experiment, save_experiments, generate_summary, load_experiments
from exp_util.plot import plot_bar, plot_cdf, plot_fct_cdf, plot_tag, plot_flow_capacity_cdf
import os
import time
import random
@experiment
def minerva_example(args: Namespace) -> None:
experiments = []
root_path = str(Path("test"))
os.system(f"mkdir -p {root_path}")
latency, bandwidth = 500, 1
name = 'minerva'
algo = 'minerva'
cc = 'minerva'
experiment_path = str(Path(root_path))
subpath = str(Path(experiment_path) / "minerva")
server1 = f"--cc {cc} --algo {algo} --server-algo {algo} --name minerva1 --video guard"
server2 = f"--cc {cc} --algo {algo} --server-algo {algo} --name minerva2 --video bojack"
path = subpath
run_subexp(bandwidth, latency, path, [server1, server2], burst=2000, video='bojack', force_run=True)
@experiment
def autotarget_training(args: Namespace) -> None:
videos = ['bojack', 'guard']
experiments = []
root_path = str(Path("test"))
os.system(f"mkdir -p {root_path}")
compete1 = [
('dynamic', 'bbr2'),
]
compete2 = [
('remote', 'target')
]
for run_id in range(100):
for video in videos:
experiment_path = str(Path(root_path) / video)
latency = 500
for bandwidth in [1, 2, 3]:
subpath = str(Path(experiment_path) / "versus_rmpc")
for (algo1, cc1) in compete1:
for (algo2, cc2) in compete2:
server1 = f"--algo {algo1} --name robustMpc --cc {cc1} --video {video} --training"
server2 = f"--server-algo {algo2} --name abrcc --cc {cc2} --video {video} --training"
path = str(Path(subpath) / f"{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=video, force_run=True,
)
@experiment
def single_flow_traffic(args: Namespace) -> None:
global run_traffic
if args.dry:
run_traffic = lambda *args, **kwargs: None
videos = ['got', 'bojack', 'cook', 'guard']
root_path = str(Path("experiments") / "traffic")
os.system(f"mkdir -p {root_path}")
runner_log = open(str(Path(root_path) / 'exp.log'), 'w')
instances = [
('--algo', 'robustMpc', 'cubic'),
('--server-algo', 'gap', 'gap'),
('--algo', 'dynamic', 'bbr2'),
('--algo', 'robustMpc', 'bbr2'),
('--algo', 'dynamic', 'cubic'),
]
experiments = []
experiment_path = str(Path(root_path) / 'sft')
subpath = experiment_path
latency = 500
for bandwidth in [2, 3, 4]:
for (where, algo, cc) in instances:
for run_id in range(4):
for video in videos:
server = f"{where} {algo} --name abr --cc {cc} --video {video}"
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_{video}_run{run_id}")
runner_log.write(f'> {path}\n')
run_traffic(path, f"{server} -l {latency} -b {bandwidth} --single-flow", headless=args.headless)
cc_name = cc if cc != "gap" else "gap2"
experiments.append(Experiment(
video = video,
path = str(Path(path) / "abr_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["sf", algo, cc_name, f"bw{bandwidth}"],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
@experiment
def traffic(args: Namespace) -> None:
global run_traffic
if args.dry:
run_traffic = lambda *args, **kwargs: None
videos = ['got', 'bojack', 'cook', 'guard']
root_path = str(Path("experiments") / "traffic")
os.system(f"mkdir -p {root_path}")
runner_log = open(str(Path(root_path) / 'exp.log'), 'w')
instances = [
('--algo', 'robustMpc', 'cubic'),
('--algo', 'robustMpc', 'bbr2'),
('--algo', 'dynamic', 'bbr2'),
('--algo', 'dynamic', 'cubic'),
('--server-algo', 'gap', 'gap'),
]
experiments = []
experiment_path = str(Path(root_path) / 'fct')
subpath = experiment_path
latency = 100
for bandwidth in [5, 4, 3]:
for (where, algo, cc) in instances:
for run_id in range(10):
video = random.choice(videos)
server = f"{where} {algo} --name abr --cc {cc} --video {video}"
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_traffic(path, f"{server} -l {latency} -b {bandwidth} --light", headless=args.headless, burst=20000)
cc_name = cc if cc != "gap" else "gap2"
experiments.append(Experiment(
video = video,
path = str(Path(path) / "abr_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["fct", algo, cc_name, f"bw{bandwidth}"],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
latency = 100
for video in videos:
experiments = []
experiment_path = str(Path(root_path) / video)
for run_id in range(4):
for bandwidth in [3, 2, 1]:
# versus
subpath = str(Path(experiment_path) / "versus_rmpc")
for (where, algo, cc) in instances:
server = f"{where} {algo} --name abr --cc {cc} --video {video}"
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_traffic(path, f"{server} -l {latency} -b {bandwidth}", headless=args.headless)
if cc == "gap":
cc = "gap2"
experiments.append(Experiment(
video = video,
path = str(Path(path) / "abr_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["traffic", algo, cc, video],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
@experiment
def stream_count(args: Namespace) -> None:
global run_trace, run_subexp
if args.dry:
run_trace = lambda *args, **kwargs: None
run_subexp = lambda *args, **kwargs: None
videos = ['got', 'bojack', 'cook', 'guard']
root_path = str(Path("experiments") / "stream_count")
os.system(f"mkdir -p {root_path}")
runner_log = open(str(Path(root_path) / 'exp.log'), 'w')
algorithms = [
('--server-algo', 'minerva', 'minerva'),
('--server-algo', 'minervann', 'minerva'),
('--server-algo', 'gap', 'gap'),
('--algo', 'robustMpc', 'cubic'),
('--algo', 'robustMpc', 'bbr2'),
('--algo', 'dynamic', 'bbr2'),
('--algo', 'dynamic', 'cubic'),
]
experiments = []
experiment_path = str(Path(root_path))
runs = 20
latency = 500
bandwidth = 4
min_streams, max_streams = 2, 8
for stream_number in range(max_streams, min_streams - 1, -1):
for run_id in range(runs):
for (arg, algo, cc) in algorithms:
run_servers = []
run_videos = []
for i in range(stream_number):
video = random.choice(videos)
server = f"{arg} {algo} --name abr{i + 1} --cc {cc} --video {video}"
if algo == "minerva" or algo == "minervann":
server += f" --algo {algo}"
run_videos.append(video)
run_servers.append(server)
video_length = list(zip(map(get_video_chunks, run_videos), run_videos))
shortest_video = min(video_length)[1]
path = str(Path(experiment_path) / f"{algo}_{cc}_streams{stream_number}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, run_servers, burst=2000, video=shortest_video,
headless=args.headless,
)
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = [f"streams{stream_number}", algo, cc],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
@experiment
def multiple(args: Namespace) -> None:
global run_trace, run_subexp
if args.dry:
run_trace = lambda *args, **kwargs: None
run_subexp = lambda *args, **kwargs: None
videos = ['got', 'bojack', 'cook', 'guard']
root_path = str(Path("experiments") / "multiple_videos")
os.system(f"mkdir -p {root_path}")
runner_log = open(str(Path(root_path) / 'exp.log'), 'w')
compete1 = [
('robustMpc', 'cubic'),
('robustMpc', 'bbr2'),
('dynamic', 'bbr2'),
('dynamic', 'cubic'),
]
compete2 = [
('gap', 'gap'),
]
minerva = [
('minervann', 'minerva'),
('minerva', 'minerva'),
]
for video in videos:
experiments = []
experiment_path = str(Path(root_path) / video)
for run_id in range(4):
latency = 500
for bandwidth in [3, 2, 1]:
# versus
subpath = str(Path(experiment_path) / "versus_rmpc")
for (algo1, cc1) in compete1:
for (algo2, cc2) in compete2:
server1 = f"--algo {algo1} --name robustMpc --cc {cc1} --video {video}"
server2 = f"--server-algo {algo2} --name abrcc --cc {cc2} --video {video}"
path = str(Path(subpath) / f"{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
if algo1 != 'robustMpc': # since we don't want to repet old experiments
path = str(Path(subpath) / f"{algo1}_{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=video,
headless=args.headless
)
if cc2 == "gap":
cc2 = "gap2"
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["versus", algo1, cc1, algo2, cc2, video],
run_id = run_id,
))
# self
subpath = str(Path(experiment_path) / "versus_self")
for (algo, cc) in compete2 + minerva:
server1 = f"--server-algo {algo} --name abrcc1 --cc {cc} --video {video}"
server2 = f"--server-algo {algo} --name abrcc2 --cc {cc} --video {video}"
if algo == "minerva" or algo == "minervann":
server1 += f" --algo {algo}"
server2 += f" --algo {algo}"
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=video,
headless=args.headless
)
if cc == "gap":
cc = "gap2"
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["self", algo, cc],
run_id = run_id,
))
# baselines
subpath = str(Path(experiment_path) / "rmpc")
for cc1, cc2 in [('cubic', 'bbr2'), ('bbr2', 'bbr2'), ('cubic', 'cubic')]:
for algo in ['robustMpc', 'dynamic']:
server1 = f"--algo {algo} --name rmpc1 --cc {cc1} --video {video}"
server2 = f"--algo {algo} --name rmpc2 --cc {cc2} --video {video}"
path = str(Path(subpath) / f"{cc1}_{cc2}_{bandwidth}_run{run_id}")
if algo != 'robustMpc': # since we don't want to repet old experiments
path = str(Path(subpath) / f"{algo}_{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=video,
headless=args.headless
)
extra = 'rmpc'
if algo == 'dynamic':
extra = 'dynamic'
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = [extra, cc1 + '1', cc2 + '2', video],
run_id = run_id,
))
# traces
subpath = str(Path(experiment_path) / "traces")
server1 = f"--cc target --server-algo target2 --name abrcc --video {video}"
server2 = f"--cc bbr2 --algo robustMpc --name robustMpc --video {video}"
server6 = f"--cc bbr2 --algo dynamic --name dynamic --video {video}"
server3 = f"--cc gap --server-algo gap --name abrcc --video {video}"
server4 = f"--cc gap --server-algo remote --name abrcc --video {video}"
server5 = f"--cc cubic --algo robustMpc --name robustMpc --video {video}"
for plot_name, name, server in [
("robustMpc", "rmpc_bbr", server2),
("robustMpc", "rmpc_cubic", server5),
("dynamic", "dynamic_bbr", server6),
("abrcc", "target2", server1),
("abrcc", "gap_pid", server3),
("abrcc", "remote", server4),
]:
traces = Path("network_traces")
for trace in [
str(traces / "norway_train_13.txt"),
str(traces / "car.txt"),
str(traces / "bus.txt"),
str(traces / "bus1.txt"),
str(traces / "norway_train_6.txt"),
str(traces / "norway_ferry_11.txt"),
str(traces / "norway_ferry_20.txt"),
str(traces / "norway_ferry_6.txt"),
str(traces / "norway_metro_6.txt"),
str(traces / "norway_tram_5.txt"),
str(traces / "norway_tram_14.txt"),
str(traces / "norway_tram_16.txt"),
str(traces / "norway_tram_19.txt"),
]:
trace_name = trace.split('/')[-1].split('.')[0]
path = str(Path(subpath) / f'{name}_{trace_name}')
runner_log.write(f'> {path}\n')
run_trace(path, f"{server} -l {latency} -t {trace}", headless=args.headless)
experiments.append(Experiment(
video = video,
path = str(Path(path) / f"{plot_name}_plots.log"),
latency = latency,
trace = trace,
extra = ["traces", name, trace, run_id],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
@experiment
def multiple2(args: Namespace) -> None:
global run_trace, run_subexp
if args.dry:
run_trace = lambda *args, **kwargs: None
run_subexp = lambda *args, **kwargs: None
videos = ['got', 'bojack', 'cook', 'guard']
root_path = str(Path("experiments") / "multiple_videos2")
os.system(f"mkdir -p {root_path}")
runner_log = open(str(Path(root_path) / 'exp.log'), 'w')
compete1 = [
('robustMpc', 'cubic'),
('robustMpc', 'bbr2'),
('dynamic', 'bbr2'),
('dynamic', 'cubic'),
]
compete2 = [
('gap', 'gap'),
]
for video in videos:
experiments = []
experiment_path = str(Path(root_path) / video)
for run_id in range(4):
latency = 500
for bandwidth in [4, 3, 2]:
# versus
subpath = str(Path(experiment_path) / "versus")
for (algo1, cc1) in compete1:
for (algo2, cc2) in compete2:
server1 = f"--algo {algo1} --name robustMpc --cc {cc1} --video {video}"
server3 = f"--algo {algo1} --name robustMpc2 --cc {cc1} --video {video}"
server2 = f"--server-algo {algo2} --name abrcc --cc {cc2} --video {video}"
path = str(Path(subpath) / f"{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
if algo1 != 'robustMpc': # since we don't want to repet old experiments
path = str(Path(subpath) / f"{algo1}_{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server3, server2], burst=2000, video=video,
headless=args.headless
)
if cc2 == "gap":
cc2 = "gap2"
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["versus", algo1, cc1, algo2, cc2, video],
run_id = run_id,
))
# same type
subpath = str(Path(experiment_path) / "rmpc")
for cc in ['cubic', 'bbr2']:
for algo in ['robustMpc', 'dynamic']:
server1 = f"--algo {algo} --name rmpc1 --cc {cc} --video {video}"
server2 = f"--algo {algo} --name rmpc2 --cc {cc} --video {video}"
server3 = f"--algo {algo} --name rmpc3 --cc {cc} --video {video}"
path = str(Path(subpath) / f"{cc}_{bandwidth}_run{run_id}")
if algo != 'robustMpc': # since we don't want to repet old experiments
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2, server3], burst=2000, video=video,
headless=args.headless
)
extra = 'rmpc'
if algo == 'dynamic':
extra = 'dynamic'
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = [extra, cc + '1', cc + '2', cc + '3', video],
run_id = run_id,
))
# minerva
for cc, algo in [
("minerva", "minerva"),
("minerva", "minervann"),
]:
server1 = f"--algo {algo} --server-algo {algo} --name rmpc1 --cc {cc} --video {video}"
server2 = f"--algo {algo} --server-algo {algo} --name rmpc2 --cc {cc} --video {video}"
server3 = f"--algo {algo} --server-algo {algo} --name rmpc3 --cc {cc} --video {video}"
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2, server3], burst=2000, video=video,
headless=args.headless
)
experiments.append(Experiment(
video = video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = [algo, cc + '1', cc + '2', cc + '3', video],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
@experiment
def hetero(args: Namespace) -> None:
global run_trace, run_subexp
if args.dry:
run_trace = lambda *args, **kwargs: None
run_subexp = lambda *args, **kwargs: None
videos = ['got', 'bojack', 'guard']
root_path = str(Path("experiments") / "hetero")
os.system(f"mkdir -p {root_path}")
runner_log = open(str(Path(root_path) / 'exp.log'), 'w')
# only for rmpc at the moment
compete1 = [
('robustMpc', 'bbr2'),
('dynamic', 'bbr2'),
('robustMpc', 'cubic'),
('dynamic', 'cubic'),
]
compete2 = [
('gap', 'gap'),
]
minerva = [
('minerva', 'minerva'),
('minervann', 'minerva'),
]
for i, video1 in enumerate(videos):
for j, video2 in enumerate(videos):
if i != j:
shorter_video = video1 if get_video_chunks(video1) < get_video_chunks(video2) else video2
experiments = []
experiment_path = str(Path(root_path) / f"{video1}_{video2}")
for run_id in range(4):
latency = 500
# robustMpc vs others
for bandwidth in [3, 2, 1]:
subpath = str(Path(experiment_path) / "versus_rmpc")
for (algo1, cc1) in compete1:
for (algo2, cc2) in compete2:
server1 = f"--algo {algo1} --name robustMpc --cc {cc1} --video {video1}"
server2 = f"--server-algo {algo2} --name abrcc --cc {cc2} --video {video2}"
path = str(Path(subpath) / f"{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
if algo1 != 'robustMpc': # since we don't want to repet old experiments
path = str(Path(subpath) / f"{algo1}_{cc1}_{algo2}_{cc2}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=shorter_video,
headless=args.headless
)
if cc2 == "gap":
cc2 = "gap2"
experiments.append(Experiment(
video = shorter_video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["versus", algo1, cc1, algo2, cc2, f"{video1}1", f"{video2}2"],
run_id = run_id,
))
# self
subpath = str(Path(experiment_path) / "versus_self")
for (algo, cc) in compete2 + minerva:
server1 = f"--server-algo {algo} --name abrcc1 --cc {cc} --video {video1}"
server2 = f"--server-algo {algo} --name abrcc2 --cc {cc} --video {video2}"
if algo == "minerva" or algo == "minervann":
server1 += f" --algo {algo}"
server2 += f" --algo {algo}"
path = str(Path(subpath) / f"{algo}_{cc}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=shorter_video,
headless=args.headless
)
if cc == "gap":
cc = "gap2"
experiments.append(Experiment(
video = shorter_video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = ["self", algo, cc, f"{video1}1", f"{video2}"],
run_id = run_id,
))
# robustMpc
subpath = str(Path(experiment_path) / "rmpc")
for cc1, cc2 in [('cubic', 'bbr2'), ('bbr2', 'bbr2'), ('cubic', 'cubic')]:
for algo in ['robustMpc', 'dynamic']:
server1 = f"--algo {algo} --name rmpc1 --cc {cc1} --video {video1}"
server2 = f"--algo {algo} --name rmpc2 --cc {cc2} --video {video2}"
path = str(Path(subpath) / f"{cc1}_{cc2}_{bandwidth}_run{run_id}")
if algo != 'robustMpc': # since we don't want to repet old experiments
path = str(Path(subpath) / f"{algo}_{cc1}_{cc2}_{bandwidth}_run{run_id}")
runner_log.write(f'> {path}\n')
run_subexp(
bandwidth, latency, path, [server1, server2], burst=2000, video=shorter_video,
headless=args.headless
)
extra = 'rmpc'
if algo == 'dynamic':
extra = 'dynamic'
experiments.append(Experiment(
video = shorter_video,
path = str(Path(path) / "leader_plots.log"),
latency = latency,
bandwidth = bandwidth,
extra = [extra, cc1 + '1', cc2 + '2', f"{video1}1", f"{video2}2"],
run_id = run_id,
))
if args.dry:
print(experiments)
print(len(experiments))
else:
save_experiments(experiment_path, experiments)
generate_summary(experiment_path, experiments)
@experiment
def generate_plots(args: Namespace) -> None:
avg = lambda xs: sum(xs) / len(xs)
cap = lambda xs: max(xs + [-50])
cv = lambda xs: stdev(xs) / avg(xs)
def plot_multiple(path: str, experiments: List[Experiment], cc: str) -> None:
plot_bar(path, experiments, [
# performance
(["versus", "robustMpc", f"{cc}", "gap2"], (avg, "Gap-RobustMpc", 1) ),
(["versus", "dynamic", f"{cc}", "gap2"], (avg, "Gap-Dynamic", 1) ),
(["rmpc", f"{cc}1", f"{cc}2", f"{cc}3"], (avg, "RobustMpc", 1) ),
(["dynamic", f"{cc}1", f"{cc}2", f"{cc}3"], (avg, "Dynamic", 1) ),
# fairness
(["versus", "robustMpc", f"{cc}", "gap2"], ('abrcc', "Gap-RobustMpc", 2) ),
(["versus", "dynamic", f"{cc}", "gap2"], ('abrcc', "Gap-Dynamic", 2) ),
(["rmpc", f"{cc}1", f"{cc}2", f"{cc}3"], (min, "RobustMpc", 2) ),
(["dynamic", f"{cc}1", f"{cc}2", f"{cc}3"], (min, "Dynamic", 2) ),
], x_range = ["4Mbps", "3Mbps", "2Mbps"],
metrics=["vmaf_qoe"], y_labels={'vmaf_qoe' : 'QoE'}, legend_location=4,
)
def plot_versus(path: str, experiments: List[Experiment], cc: str, **kwargs) -> None:
plot_bar(path, experiments, [
# performance
(["versus", "robustMpc", f"{cc}", "gap2"], ("abrcc", "Gap-RobustMpc", 1) ),
(["versus", "dynamic", f"{cc}", "gap2"], ("abrcc", "Gap-Dynamic", 1) ),
(["rmpc", f"{cc}1", f"{cc}2"], (max, "RobustMpc", 1) ),
(["dynamic", f"{cc}1", f"{cc}2"], (max, "Dynamic", 1) ),
# fairness
(["versus", "robustMpc", f"{cc}", "gap2"], ("robustMpc", "Gap-RobustMpc", 2) ),
(["versus", "dynamic", f"{cc}", "gap2"], ("robustMpc", "Gap-Dynamic", 2) ),
(["rmpc", f"{cc}1", f"{cc}2"], (min, "RobustMpc", 2) ),
(["dynamic", f"{cc}1", f"{cc}2"], (min, "Dynamic", 2) ),
], metrics=["vmaf_qoe"], y_labels={'vmaf_qoe' : 'QoE'}, **kwargs)
def plot_hetero_versus(path: str, experiments: List[Experiment], cc: str, **kwargs) -> None:
plot_bar(path, experiments, [
# performance
(["versus", "robustMpc", f"{cc}", "gap2"], ("abrcc", "Gap-RobustMpc", 1) ),
(["versus", "dynamic", f"{cc}", "gap2"], ("abrcc", "Gap-Dynamic", 1) ),
(["rmpc", f"{cc}1", f"{cc}2"], (min, "RobustMpc", 1) ),
(["dynamic", f"{cc}1", f"{cc}2"], (min, "Dynamic", 1) ),
# fairness
(["versus", "robustMpc", f"{cc}", "gap2"], ("robustMpc", "Gap-RobustMpc", 2) ),
(["versus", "dynamic", f"{cc}", "gap2"], ("robustMpc", "Gap-Dynamic", 2) ),
(["rmpc", f"{cc}1", f"{cc}2"], (max, "RobustMpc", 2) ),
(["dynamic", f"{cc}1", f"{cc}2"], (max, "Dynamic", 2) ),
], metrics=["vmaf_qoe"], y_labels={'vmaf_qoe' : 'QoE'}, **kwargs)
def plot_self(path: str, experiments: List[Experiment], **kwargs) -> None:
plot_bar(path, experiments, [
(["self", "gap", "gap2"], (min, " Gap", 1) ),
(["dynamic", "cubic1", "cubic2"], (min, "Dynamic-Cubic", 1) ),
(["dynamic", "bbr21", "bbr22"], (min, "Dynamic-BBR", 1) ),
(["rmpc", "cubic1", "cubic2"], (min, "RobustMpc-Cubic", 1) ),
(["rmpc", "bbr21", "bbr22"], (min, "RobustMpc-BBR", 1) ),
(["minervann"], (min, "Minerva", 1) ),
(["self", "gap", "gap2"], (avg, " Gap", 2) ),
(["dynamic", "cubic1", "cubic2"], (avg, "Dynamic-Cubic", 2) ),
(["dynamic", "bbr21", "bbr22"], (avg, "Dynamic-BBR", 2) ),
(["rmpc", "cubic1", "cubic2"], (avg, "RobustMpc-Cubic", 2) ),
(["rmpc", "bbr21", "bbr22"], (avg, "RobustMpc-BBR", 2) ),
(["minervann"], (avg, "Minerva", 2) ),
], metrics=["vmaf_qoe"], y_labels={'vmaf_qoe' : 'QoE'}, **kwargs)
def plot_traces(path: str, experiments: List[Experiment]) -> None:
plot_cdf(path, experiments, [
(["traces", "rmpc_bbr"], ("robustMpc", "RobustMpc-BBR", 1) ),
(["traces", "dynamic_bbr"], ("dynamic", "Dynamic-BBR", 1) ),
(["traces", "dynamic_cubic"], ("dynamic", "Dynamic-Cubic", 1) ),
(["traces", "rmpc_cubic"], ("robustMpc", "RobustMpc-Cubic", 1) ),
(["traces", "gap_pid"], ("abrcc", "Gap", 1) ),
], metrics=["vmaf", "vmaf_qoe"], x_labels={'vmaf_qoe': 'QoE', 'vmaf': 'VMAF'})
def plot_fct_traffic(path: str, experiments: List[Experiment], bw: Optional[int] = None) -> None:
extra = [f"bw{bw}"] if bw else []
plot_fct_cdf(path, experiments, [
(["fct", "robustMpc", "bbr2"] + extra, ('abr', "RobustMpc-BBR", 1) ),
(["fct", "robustMpc", "cubic"] + extra, ('abr', "RobustMpc-Cubic", 1) ),
(["fct", "dynamic", "bbr2"] + extra, ('abr', "Dynamic-BBR", 1) ),
(["fct", "dynamic", "cubic"] + extra, ('abr', "Dynamic-Cubic", 1) ),
(["fct", "gap"] + extra, ('abr', "Gap", 1) ),
])
def plot_flow_capacity(path: str, experiments: List[Experiment]) -> None:
plot_flow_capacity_cdf(path, experiments, [
(["sf", "robustMpc", "bbr2"], ('abr', "RobustMpc-BBR", 1) ),
(["sf", "robustMpc", "cubic"], ('abr', "RobustMpc-Cubic", 1) ),
(["sf", "dynamic", "bbr2"], ('abr', "Dynamic-BBR", 1) ),
(["sf", "dynamic", "cubic"], ('abr', "Dynamic-Cubic", 1) ),
(["sf", "gap"], ('abr', "Gap", 1) ),
])
plot_cdf(path, experiments, [
(["sf", "robustMpc", "bbr2"], ('abr', "RobustMpc-BBR", 1) ),
(["sf", "robustMpc", "cubic"], ('abr', "RobustMpc-Cubic", 1) ),
(["sf", "dynamic", "bbr2"], ('abr', "Dynamic-BBR", 1) ),
(["sf", "dynamic", "cubic"], ('abr', "Dynamic-Cubic", 1) ),
(["sf", "gap"], ('abr', "Gap", 1) ),
], metrics=["vmaf_qoe"], x_labels={'vmaf_qoe': 'QoE'})
def plot_stream_count(path: str, experiments: List[Experiment], partial_tag: str, func_name: str, func, **kwargs) -> None:
plot_tag(path, experiments, [
(["robustMpc", "bbr2"], (func, "RobustMpc-BBR", 1) ),
(["robustMpc", "cubic"], (func, "RobustMpc-Cubic", 1) ),
(["dynamic", "bbr2"], (func, "Dynamic-BBR", 1) ),
(["dynamic", "cubic"], (func, "Dynamic-Cubic", 1) ),
(["gap"], (func, "Gap", 1) ),
(["minervann"], (func, "Minerva", 1) ),
], partial_tag, metrics=['vmaf_qoe'], y_labels={'vmaf_qoe': func_name}, **kwargs)
experiment_path = str(Path("experiments") / "plots")
os.system(f"mkdir -p {experiment_path}")
# traffic fct
traffic_path = str(Path(experiment_path) / "traffic")
os.system(f"mkdir -p {traffic_path}")
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "traffic" / "fct"),
]], [])
for bw in [5, 4, 3]:
plot_fct_traffic(str(Path(traffic_path) / f"fct{bw}"), experiments, bw=bw)
plot_fct_traffic(str(Path(traffic_path) / "fct"), experiments)
# single flow traffic
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "traffic" / "sft"),
]], [])
plot_flow_capacity(str(Path(traffic_path) / "sft"), experiments)
# per-video plots
videos = ['got', 'bojack', 'guard', 'cook']
for video in videos:
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "multiple_videos" / video),
]], [])
os.system(f"mkdir -p {experiment_path}/{video}")
for cc in ['cubic', 'bbr2']:
plot_versus(str(Path(experiment_path) / video / f"{cc}"), experiments, cc)
plot_self(str(Path(experiment_path) / video / "self"), experiments)
plot_traces(str(Path(experiment_path) / video / "traces"), experiments)
# 3 flow experiments
for video in videos:
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "multiple_videos2" / video),
]], [])
os.system(f"mkdir -p {experiment_path}/{video}")
for cc in ['cubic', 'bbr2']:
plot_multiple(str(Path(experiment_path) / video / f"multiple_{cc}"), experiments, cc)
# hetero experiments
videos = ['got', 'bojack', 'guard']
for i, video1 in enumerate(videos):
for j, video2 in enumerate(videos):
if i != j:
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "hetero" / f"{video1}_{video2}"),
]], [])
os.system(f"mkdir -p {experiment_path}/{video1}_{video2}")
for cc in ['cubic', 'bbr2']:
plot_hetero_versus(str(Path(experiment_path) / f"{video1}_{video2}" / f"{cc}"), experiments, cc)
plot_self(str(Path(experiment_path) / f"{video1}_{video2}" / "self"), experiments)
# stream count
stream_count_path = str(Path(experiment_path) / "stream_count")
os.system(f"mkdir -p {stream_count_path}")
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "stream_count"),
]], [])
plot_stream_count(
str(Path(stream_count_path) / "stream_count"), experiments, "streams", 'Total QoE', sum, legend_location=2,
)
plot_stream_count(
str(Path(stream_count_path) / "stream_count_fair"), experiments, "streams", 'Minimum QoE', min, legend_location=1,
)
plot_stream_count(
str(Path(stream_count_path) / "stream_count_cv"), experiments, "streams", 'QoE CV', cv, legend_location=2,
)
# summaries
videos = ['got', 'bojack', 'guard', 'cook']
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "multiple_videos" / video)
for video in videos
]], [])
experiments2 = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "multiple_videos" / video)
for video in ['guard', 'bojack', 'cook']
]], [])
os.system(f"mkdir -p {experiment_path}/summary")
for cc in ['cubic', 'bbr2']:
plot_versus(str(Path(experiment_path) / 'summary' / f"{cc}"), experiments, cc)
plot_self(str(Path(experiment_path) / 'summary' / "self"), experiments)
plot_traces(str(Path(experiment_path) / 'summary' / "traces"), experiments2)
# summary multiple
experiments = sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "multiple_videos2" / video)
for video in videos
]], [])
for cc in ['cubic', 'bbr2']:
plot_multiple(str(Path(experiment_path) / "summary" / f"multiple_{cc}"), experiments, cc)
# summary hetero
experiments = []
videos = ['got', 'bojack', 'guard']
for i, video1 in enumerate(videos):
for j, video2 in enumerate(videos):
if j > i:
experiments += sum([load_experiments(experiment) for experiment in [
str(Path("experiments") / "hetero" / f"{video1}_{video2}"),
]], [])
for cc in ['cubic', 'bbr2']:
plot_hetero_versus(str(Path(experiment_path) / f"summary" / f"hetero_{cc}"), experiments, cc)
plot_self(str(Path(experiment_path) / f"summary" / "hetero_self"), experiments, exclude=[], legend_location=3)
@experiment
def run_all(args: Namespace) -> None:
traffic(args)
multiple(args)
multiple2(args)
hetero(args)
single_flow_traffic(args)
stream_count(args)
if __name__ == "__main__":
parser = ArgumentParser(description=
f'Run experiment setup in this Python file. ' +
f'Available experiments: {list(experiments().keys())}')
parser.add_argument('name', type=str, help='Experiment name.')
parser.add_argument('-d', '--dry', action='store_true', dest='dry', help='Dry run.')
parser.add_argument('-hl', '--headless', action='store_true', dest='headless', help='Hide the UI.')
args = parser.parse_args()
if args.name in experiments():
experiments()[args.name](args)
else:
print(f'No such experiment: {args.name}')
print(f'Available experiments: {list(EXPERIMENTS.keys())}')
|
# 20/02/2017, Rodrigo Santa Cruz
# Script to perform prediction of a model in a set of images
from keras.preprocessing import image
from keras import backend as K
from matplotlib import pyplot as plt
import objcls_model
import numpy as np
import argparse, re
def prediction(weights_path, imgs_path, show):
"""
Perform prediction and visualization
:param weights_path: keras weights file path
:param imgs_path: list of images to perform prediction
:param show: Plot visualization
:return: list of predictions
"""
ebase_cnn, classes = objcls_model.read_model_str(weights_path)
print("Model: Base model={}, Classes={}".format(ebase_cnn.name, classes))
# Load trained model
model_builder = objcls_model.CNNModelBuilder(ebase_cnn, len(classes), input_shape=(150, 150, 3), weights=weights_path)
model = model_builder.inference_model()
# read image, pre-process and predict
print("Model prediction")
preds = []
for img_path in imgs_path:
# Load image and predict
img = image.load_img(img_path, target_size=(150, 150))
x = image.img_to_array(img)
x *= (1. / 255)
x = np.expand_dims(x, axis=0)
pred = model.predict(x)
preds.append(pred)
# Show results
pred_str = ", ".join(["{}:{:.2f}".format(cls, prob) for cls, prob in zip(classes, np.squeeze(pred))])
print("{}: {}".format(img_path, pred_str))
if show:
plt.imshow(img)
plt.title(pred_str)
plt.show()
# Clear keras session
K.clear_session()
return preds
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="Prediction Demo")
parser.add_argument("weigths_path", type=str, help="Path to model weight file")
parser.add_argument("imgs_path", type=str, nargs='*', help="Path to images samples")
parser.add_argument("-show", default=False, action="store_true", help="Visualize predictions")
args = parser.parse_args()
print("Classification demo: {}".format(args))
prediction(args.weigths_path, args.imgs_path, args.show)
|
#Boolean Operators
'''
Statements about variables and values that use
the logical operators always give us a Boolean
value, such as True or False. Because of this,
these statements are called Boolean expressions.
All of our statements about pineapples and
zebras are Boolean expressions.
'''
pineapples = 5
zebras = 2
print(zebras < pineapples) #less than
print(pineapples == zebras) #equal to
print(pineapples > zebras) #greater than
print(pineapples != zebras) #not equal to
'''
Multiple comparisons
You can use and and or to combine more than one
comparison. If you use and, both parts of the comparison
must be correct for the statement to be True. If you use
or, only one part needs to be correct.
'''
#Example:
age = 10
height = 18
ans = (age > 8) and (height > 53)
print(ans) |
# Write a Python function to multiply all the numbers in a list and return the result.
def multiply(lst):
product = 1
for x in lst:
product *= x
return product
lst = []
num = int(input("Enter the number of elements: "))
print("Enter the elements: ")
#adding elements in list
for i in range(0, num):
ele = int(input())
lst.append(ele)
print(multiply(lst)) |
# -*- coding: utf-8 -*-
import re
import time
from urllib import request
import pymongo
# 获取url的html内容
def get_html(url):
headers = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/48.'
'0.2564.116 Chrome/48.0.2564.116 Safari/537.36',
'pgrade-insecure-requests': '1',
':host': 'www.douban.com',
'cookie': 'bid="GhPm4Wltquk"; __utma=30149280.1923340589.1457364203.1457364203.1457444423.2; __utmz=30'
'149280.1457444423.2.2.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; ll="108309"; dbcl2="89'
'718263:pliQuc4rCo4"; ct=y; ck="hPcq"; ap=1; __ads_session=TNpLaTSpsAhx3f5K/QA=; push_noty_nu'
'm=0; push_doumail_num=0',
'referer': 'https://www.douban.com/tag/2014/movie'
}
req = request.Request(url, headers=headers)
response = request.urlopen(req)
html = response.read()
return html
# 从列表页获取所有电影的url,写入urls.txt
def get_urls():
n = 1
pages = 34
f = open('urls.txt', 'w+')
for i in range(pages):
if i == 0:
url = 'https://www.douban.com/tag/2014/movie'
else:
url = 'https://www.douban.com/tag/2014/movie?start=' + str(15*i)
html = get_html(url)
time.sleep(2)
pattern = re.compile('<dd>.*?<a href="(.*?)" class="title" target="_blank">', re.S)
mains = re.findall(pattern, html)
for main in mains:
f.write(main + '\n')
n += 1
# 到每个电影的主页爬取信息
def crawler():
n = 1
for line in open('urls.txt'):
html = get_html(line).decode('utf-8')
pattern = re.compile('"v:itemreviewed">(.*?)</span>.*?'
'"year">\((.*?)\)</span>.*?'
'制片国家/地区:</span> (.*?)<br/>', re.S)
pattern_language = re.compile('语言:</span> (.*?)<br/>', re.S)
pattern_director = re.compile('"v:directedBy">(.*?)</a>', re.S)
pattern_average = re.compile('"v:average">(.*?)</strong>', re.S)
pattern_votes = re.compile('"v:votes">(.*?)</span>', re.S)
pattern_actors = re.compile('"v:starring">(.*?)</a>', re.S)
pattern_genres = re.compile('"v:genre">(.*?)</span>', re.S)
details = re.findall(pattern, html)
language = re.findall(pattern_language, html)
director = re.findall(pattern_director, html)
average = re.findall(pattern_average, html)
votes = re.findall(pattern_votes, html)
actors = re.findall(pattern_actors, html) # 有些电影没有 演员|评分|导演|评分&评价人数|语言
genres = re.findall(pattern_genres, html)
values = {}
conn = pymongo.MongoClient('localhost', 27017)
movie_db = conn.movie
movie_info = movie_db.info
# print(details)
if details[0][0].find("'"):
values['title'] = details[0][0].replace("'", "'")
else:
values['title'] = details[0][0]
values['year'] = details[0][1]
values['country'] = details[0][2]
values['genres'] = genres
if director:
values['director'] = director[0]
if language:
values['language'] = language[0]
if average[0]:
values['average'] = average[0]
if votes:
values['votes'] = votes[0]
if actors:
values['actors'] = actors
movie_info.insert(values)
print('the %dth movie written' % n)
n += 1
time.sleep(2)
# 库的查询
def lookup():
connection = pymongo.MongoClient('localhost', 27017)
db = connection.movie
info = db.info
for item in info.find():
print(item['title'])
# db.info.remove()
# print(info.find().count())
# get_urls()
# start = time.time()
# crawler()
# end = time.time()
# print('time:%ds' % (end - start))
# lookup()
|
import unittest
from unittest.mock import patch
from coalib.bearlib.languages.documentation.DocstyleDefinition import (
DocstyleDefinition)
class DocstyleDefinitionTest(unittest.TestCase):
Metadata = DocstyleDefinition.Metadata
dummy_metadata = Metadata(':param ', ':', ':return:')
def test_fail_instantation(self):
with self.assertRaises(ValueError):
DocstyleDefinition('PYTHON', 'doxyGEN',
(('##', '#'),), self.dummy_metadata)
with self.assertRaises(ValueError):
DocstyleDefinition('WEIRD-PY',
'schloxygen',
(('##+', 'x', 'y', 'z'),),
self.dummy_metadata)
with self.assertRaises(ValueError):
DocstyleDefinition('PYTHON',
'doxygen',
(('##', '', '#'), ('"""', '"""')),
self.dummy_metadata)
with self.assertRaises(TypeError):
DocstyleDefinition(123, ['doxygen'], (('"""', '"""')),
self.dummy_metadata)
with self.assertRaises(TypeError):
DocstyleDefinition('language', ['doxygen'], (('"""', '"""')),
'metdata')
def test_properties(self):
uut = DocstyleDefinition('C', 'doxygen',
(('/**', '*', '*/'),), self.dummy_metadata)
self.assertEqual(uut.language, 'c')
self.assertEqual(uut.docstyle, 'doxygen')
self.assertEqual(uut.markers, (('/**', '*', '*/'),))
self.assertEqual(uut.metadata, self.dummy_metadata)
uut = DocstyleDefinition('PYTHON', 'doxyGEN',
[('##', '', '#')], self.dummy_metadata)
self.assertEqual(uut.language, 'python')
self.assertEqual(uut.docstyle, 'doxygen')
self.assertEqual(uut.markers, (('##', '', '#'),))
self.assertEqual(uut.metadata, self.dummy_metadata)
uut = DocstyleDefinition('I2C',
'my-custom-tool',
(['~~', '/~', '/~'], ('>!', '>>', '>>')),
self.dummy_metadata)
self.assertEqual(uut.language, 'i2c')
self.assertEqual(uut.docstyle, 'my-custom-tool')
self.assertEqual(uut.markers, (('~~', '/~', '/~'), ('>!', '>>', '>>')))
self.assertEqual(uut.metadata, self.dummy_metadata)
uut = DocstyleDefinition('Cpp', 'doxygen',
('~~', '/~', '/~'), self.dummy_metadata)
self.assertEqual(uut.language, 'cpp')
self.assertEqual(uut.docstyle, 'doxygen')
self.assertEqual(uut.markers, (('~~', '/~', '/~'),))
self.assertEqual(uut.metadata, self.dummy_metadata)
def test_load(self):
# Test unregistered docstyle.
with self.assertRaises(FileNotFoundError):
next(DocstyleDefinition.load('PYTHON', 'INVALID'))
# Test unregistered language in existing docstyle.
with self.assertRaises(KeyError):
next(DocstyleDefinition.load('bake-a-cake', 'default'))
# Test wrong argument type.
with self.assertRaises(TypeError):
next(DocstyleDefinition.load(123, ['list']))
# Test python 3 default configuration and if everything is parsed
# right.
result = DocstyleDefinition.load('PYTHON3', 'default')
self.assertEqual(result.language, 'python3')
self.assertEqual(result.docstyle, 'default')
self.assertEqual(result.markers, (('"""', '', '"""'),))
self.assertEqual(result.metadata, self.dummy_metadata)
def test_get_available_definitions(self):
# Test if the basic supported docstyle-language pairs exist.
expected = {('default', 'python'),
('default', 'python3'),
('default', 'java'),
('doxygen', 'c'),
('doxygen', 'cpp'),
('doxygen', 'cs'),
('doxygen', 'fortran'),
('doxygen', 'java'),
('doxygen', 'python'),
('doxygen', 'python3'),
('doxygen', 'tcl'),
('doxygen', 'vhdl'),
('doxygen', 'php'),
('doxygen', 'objective-c')}
real = set(DocstyleDefinition.get_available_definitions())
self.assertTrue(expected.issubset(real))
@patch('coalib.bearlib.languages.documentation.DocstyleDefinition.iglob')
@patch('coalib.bearlib.languages.documentation.DocstyleDefinition'
'.ConfParser')
def test_get_available_definitions_on_wrong_files(self,
confparser_mock,
iglob_mock):
# Test the case when a coalang was provided with uppercase letters.
confparser_instance_mock = confparser_mock.return_value
confparser_instance_mock.parse.return_value = ['X']
iglob_mock.return_value = ['some/CUSTOMSTYLE.coalang',
'SOME/xlang.coalang']
self.assertEqual(list(DocstyleDefinition.get_available_definitions()),
[('xlang', 'x')])
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for multi-objective acquisition functions.
"""
from __future__ import annotations
import math
import warnings
from math import ceil
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from botorch.acquisition import monte_carlo # noqa F401
from botorch.acquisition.multi_objective.objective import (
IdentityMCMultiOutputObjective,
MCMultiOutputObjective,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.exceptions.warnings import BotorchWarning
from botorch.models.deterministic import GenericDeterministicModel
from botorch.models.fully_bayesian import MCMC_DIM
from botorch.models.model import Model
from botorch.sampling.get_sampler import get_sampler
from botorch.utils.gp_sampling import get_gp_samples
from botorch.utils.multi_objective.box_decompositions.box_decomposition import (
BoxDecomposition,
)
from botorch.utils.multi_objective.box_decompositions.box_decomposition_list import (
BoxDecompositionList,
)
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.sampling import draw_sobol_samples
from botorch.utils.transforms import is_fully_bayesian, normalize_indices
from torch import Tensor
def get_default_partitioning_alpha(num_objectives: int) -> float:
r"""Determines an approximation level based on the number of objectives.
If `alpha` is 0, FastNondominatedPartitioning should be used. Otherwise,
an approximate NondominatedPartitioning should be used with approximation
level `alpha`.
Args:
num_objectives: the number of objectives.
Returns:
The approximation level `alpha`.
"""
if num_objectives <= 4:
return 0.0
elif num_objectives > 6:
warnings.warn("EHVI works best for less than 7 objectives.", BotorchWarning)
return 10 ** (-8 + num_objectives)
def prune_inferior_points_multi_objective(
model: Model,
X: Tensor,
ref_point: Tensor,
objective: Optional[MCMultiOutputObjective] = None,
constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
num_samples: int = 2048,
max_frac: float = 1.0,
marginalize_dim: Optional[int] = None,
) -> Tensor:
r"""Prune points from an input tensor that are unlikely to be pareto optimal.
Given a model, an objective, and an input tensor `X`, this function returns
the subset of points in `X` that have some probability of being pareto
optimal, better than the reference point, and feasible. This function uses
sampling to estimate the probabilities, the higher the number of points `n`
in `X` the higher the number of samples `num_samples` should be to obtain
accurate estimates.
Args:
model: A fitted model. Batched models are currently not supported.
X: An input tensor of shape `n x d`. Batched inputs are currently not
supported.
ref_point: The reference point.
objective: The objective under which to evaluate the posterior.
constraints: A list of callables, each mapping a Tensor of dimension
`sample_shape x batch-shape x q x m` to a Tensor of dimension
`sample_shape x batch-shape x q`, where negative values imply
feasibility.
num_samples: The number of samples used to compute empirical
probabilities of being the best point.
max_frac: The maximum fraction of points to retain. Must satisfy
`0 < max_frac <= 1`. Ensures that the number of elements in the
returned tensor does not exceed `ceil(max_frac * n)`.
marginalize_dim: A batch dimension that should be marginalized.
For example, this is useful when using a batched fully Bayesian
model.
Returns:
A `n' x d` with subset of points in `X`, where
n' = min(N_nz, ceil(max_frac * n))
with `N_nz` the number of points in `X` that have non-zero (empirical,
under `num_samples` samples) probability of being pareto optimal.
"""
if marginalize_dim is None and is_fully_bayesian(model):
# TODO: Properly deal with marginalizing fully Bayesian models
marginalize_dim = MCMC_DIM
if X.ndim > 2:
# TODO: support batched inputs (req. dealing with ragged tensors)
raise UnsupportedError(
"Batched inputs `X` are currently unsupported by "
"prune_inferior_points_multi_objective"
)
max_points = math.ceil(max_frac * X.size(-2))
if max_points < 1 or max_points > X.size(-2):
raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}")
with torch.no_grad():
posterior = model.posterior(X=X)
sampler = get_sampler(posterior, sample_shape=torch.Size([num_samples]))
samples = sampler(posterior)
if objective is None:
objective = IdentityMCMultiOutputObjective()
obj_vals = objective(samples, X=X)
if obj_vals.ndim > 3:
if obj_vals.ndim == 4 and marginalize_dim is not None:
obj_vals = obj_vals.mean(dim=marginalize_dim)
else:
# TODO: support batched inputs (req. dealing with ragged tensors)
raise UnsupportedError(
"Models with multiple batch dims are currently unsupported by"
" prune_inferior_points_multi_objective."
)
if constraints is not None:
infeas = torch.stack([c(samples) > 0 for c in constraints], dim=0).any(dim=0)
if infeas.ndim == 3 and marginalize_dim is not None:
# make sure marginalize_dim is not negative
if marginalize_dim < 0:
# add 1 to the normalize marginalize_dim since we have already
# removed the output dim
marginalize_dim = (
1 + normalize_indices([marginalize_dim], d=infeas.ndim)[0]
)
infeas = infeas.float().mean(dim=marginalize_dim).round().bool()
# set infeasible points to be the ref point
obj_vals[infeas] = ref_point
pareto_mask = is_non_dominated(obj_vals, deduplicate=False) & (
obj_vals > ref_point
).all(dim=-1)
probs = pareto_mask.to(dtype=X.dtype).mean(dim=0)
idcs = probs.nonzero().view(-1)
if idcs.shape[0] > max_points:
counts, order_idcs = torch.sort(probs, descending=True)
idcs = order_idcs[:max_points]
effective_n_w = obj_vals.shape[-2] // X.shape[-2]
idcs = (idcs / effective_n_w).long().unique()
return X[idcs]
def compute_sample_box_decomposition(
pareto_fronts: Tensor,
partitioning: BoxDecomposition = DominatedPartitioning,
maximize: bool = True,
num_constraints: Optional[int] = 0,
) -> Tensor:
r"""Computes the box decomposition associated with some sampled optimal
objectives. This also supports the single-objective and constrained optimization
setting. An objective `y` is feasible if `y <= 0`.
To take advantage of batch computations, we pad the hypercell bounds with a
`2 x (M + K)`-dim Tensor of zeros `[0, 0]`.
Args:
pareto_fronts: A `num_pareto_samples x num_pareto_points x M` dim Tensor
containing the sampled optimal set of objectives.
partitioning: A `BoxDecomposition` module that is used to obtain the
hyper-rectangle bounds for integration. In the unconstrained case, this
gives the partition of the dominated space. In the constrained case, this
gives the partition of the feasible dominated space union the infeasible
space.
maximize: If true, the box-decomposition is computed assuming maximization.
num_constraints: The number of constraints `K`.
Returns:
A `num_pareto_samples x 2 x J x (M + K)`-dim Tensor containing the bounds for
the hyper-rectangles. The number `J` is the smallest number of boxes needed
to partition all the Pareto samples.
"""
tkwargs = {"dtype": pareto_fronts.dtype, "device": pareto_fronts.device}
# We will later compute `norm.log_prob(NEG_INF)`, this is `-inf` if `NEG_INF` is
# too small.
NEG_INF = -1e10
if pareto_fronts.ndim != 3:
raise UnsupportedError(
"Currently this only supports Pareto fronts of the shape "
"`num_pareto_samples x num_pareto_points x num_objectives`."
)
num_pareto_samples = pareto_fronts.shape[0]
M = pareto_fronts.shape[-1]
K = num_constraints
ref_point = torch.ones(M, **tkwargs) * NEG_INF
weight = 1.0 if maximize else -1.0
if M == 1:
# Only consider a Pareto front with one element.
extreme_values = weight * torch.max(weight * pareto_fronts, dim=-2).values
ref_point = weight * ref_point.expand(extreme_values.shape)
if maximize:
hypercell_bounds = torch.stack(
[ref_point, extreme_values], axis=-2
).unsqueeze(-1)
else:
hypercell_bounds = torch.stack(
[extreme_values, ref_point], axis=-2
).unsqueeze(-1)
else:
bd_list = []
for i in range(num_pareto_samples):
bd_list = bd_list + [
partitioning(ref_point=ref_point, Y=weight * pareto_fronts[i, :, :])
]
# `num_pareto_samples x 2 x J x (M + K)`
hypercell_bounds = (
BoxDecompositionList(*bd_list).get_hypercell_bounds().movedim(0, 1)
)
# If minimizing, then the bounds should be negated and flipped
if not maximize:
hypercell_bounds = weight * torch.flip(hypercell_bounds, dims=[1])
# Add an extra box for the inequality constraint.
if K > 0:
# `num_pareto_samples x 2 x (J - 1) x K`
feasible_boxes = torch.zeros(
hypercell_bounds.shape[:-1] + torch.Size([K]), **tkwargs
)
feasible_boxes[..., 0, :, :] = NEG_INF
# `num_pareto_samples x 2 x (J - 1) x (M + K)`
hypercell_bounds = torch.cat([hypercell_bounds, feasible_boxes], dim=-1)
# `num_pareto_samples x 2 x 1 x (M + K)`
infeasible_box = torch.zeros(
hypercell_bounds.shape[:-2] + torch.Size([1, M + K]), **tkwargs
)
infeasible_box[..., 1, :, M:] = -NEG_INF
infeasible_box[..., 0, :, 0:M] = NEG_INF
infeasible_box[..., 1, :, 0:M] = -NEG_INF
# `num_pareto_samples x 2 x J x (M + K)`
hypercell_bounds = torch.cat([hypercell_bounds, infeasible_box], dim=-2)
# `num_pareto_samples x 2 x J x (M + K)`
return hypercell_bounds
def random_search_optimizer(
model: GenericDeterministicModel,
bounds: Tensor,
num_points: int,
maximize: bool,
pop_size: int = 1024,
max_tries: int = 10,
) -> Tuple[Tensor, Tensor]:
r"""Optimize a function via random search.
Args:
model: The model.
bounds: A `2 x d`-dim Tensor containing the input bounds.
num_points: The number of optimal points to be outputted.
maximize: If true, we consider a maximization problem.
pop_size: The number of function evaluations per try.
max_tries: The maximum number of tries.
Returns:
A two-element tuple containing
- A `num_points x d`-dim Tensor containing the collection of optimal inputs.
- A `num_points x M`-dim Tensor containing the collection of optimal
objectives.
"""
tkwargs = {"dtype": bounds.dtype, "device": bounds.device}
weight = 1.0 if maximize else -1.0
optimal_inputs = torch.tensor([], **tkwargs)
optimal_outputs = torch.tensor([], **tkwargs)
num_tries = 0
ratio = 2
while ratio > 1 and num_tries < max_tries:
X = draw_sobol_samples(bounds=bounds, n=pop_size, q=1).squeeze(-2)
Y = model.posterior(X).mean
X_aug = torch.cat([optimal_inputs, X], dim=0)
Y_aug = torch.cat([optimal_outputs, Y], dim=0)
pareto_mask = is_non_dominated(weight * Y_aug)
optimal_inputs = X_aug[pareto_mask]
optimal_outputs = Y_aug[pareto_mask]
num_found = len(optimal_inputs)
ratio = ceil(num_points / num_found)
num_tries = num_tries + 1
# If maximum number of retries exceeded throw out a runtime error.
if ratio > 1:
error_text = f"Only found {num_found} optimal points instead of {num_points}."
raise RuntimeError(error_text)
else:
return optimal_inputs[:num_points], optimal_outputs[:num_points]
def sample_optimal_points(
model: Model,
bounds: Tensor,
num_samples: int,
num_points: int,
optimizer: Callable[
[GenericDeterministicModel, Tensor, int, bool, Any], Tuple[Tensor, Tensor]
] = random_search_optimizer,
num_rff_features: int = 512,
maximize: bool = True,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[Tensor, Tensor]:
r"""Compute a collection of optimal inputs and outputs from samples of a Gaussian
Process (GP).
Steps:
(1) The samples are generated using random Fourier features (RFFs).
(2) The samples are optimized sequentially using an optimizer.
TODO: We can generalize the GP sampling step to accommodate for other sampling
strategies rather than restricting to RFFs e.g. decoupled sampling.
TODO: Currently this defaults to random search optimization, might want to
explore some other alternatives.
Args:
model: The model. This does not support models which include fantasy
observations.
bounds: A `2 x d`-dim Tensor containing the input bounds.
num_samples: The number of GP samples.
num_points: The number of optimal points to be outputted.
optimizer: A callable that solves the deterministic optimization problem.
num_rff_features: The number of random Fourier features.
maximize: If true, we consider a maximization problem.
optimizer_kwargs: The additional arguments for the optimizer.
Returns:
A two-element tuple containing
- A `num_samples x num_points x d`-dim Tensor containing the collection of
optimal inputs.
- A `num_samples x num_points x M`-dim Tensor containing the collection of
optimal objectives.
"""
tkwargs = {"dtype": bounds.dtype, "device": bounds.device}
M = model.num_outputs
d = bounds.shape[-1]
if M == 1:
if num_points > 1:
raise UnsupportedError(
"For single-objective optimization `num_points` should be 1."
)
if optimizer_kwargs is None:
optimizer_kwargs = {}
pareto_sets = torch.zeros((num_samples, num_points, d), **tkwargs)
pareto_fronts = torch.zeros((num_samples, num_points, M), **tkwargs)
for i in range(num_samples):
sample_i = get_gp_samples(
model=model, num_outputs=M, n_samples=1, num_rff_features=num_rff_features
)
ps_i, pf_i = optimizer(
model=sample_i,
bounds=bounds,
num_points=num_points,
maximize=maximize,
**optimizer_kwargs,
)
pareto_sets[i, ...] = ps_i
pareto_fronts[i, ...] = pf_i
return pareto_sets, pareto_fronts
|
from setuptools import setup
from setuptools import find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(
name='arcticdem',
version='0.2.0',
packages=find_packages(),
package_data={'arcticdem':['*.*']},
url='https://github.com/samapriya/ArcticDEM-Batch-Pipeline',
license='Apache 2.0',
install_requires=['requests>=2.21.1',
'progressbar2>=3.38.0',
'beautifulsoup4',
'retrying>=1.3.3',
'Rtree-linux>=0.9.4;platform_system=="Linux"',
'pyproj>=1.9.5.1;platform_system!="Windows"',
'shapely>=1.6.4;platform_system!="Windows"',
'fiona>=1.8.6;platform_system!="Windows"',
'geopandas>=0.5.0;platform_system!="Windows"',
],
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: GIS',
),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
python_requires='>=3.4',
author='Samapriya Roy',
author_email='samapriya.roy@gmail.com',
description='ArcticDEM Batch Download & Processing Tools',
entry_points={
'console_scripts': [
'arcticdem=arcticdem.arcticdem:main',
],
},
)
|
from flask import Flask,url_for,request,render_template,jsonify,send_file
from flask_bootstrap import Bootstrap
import json
import extract
import highlight
# NLP Pkgs
import spacy
from textblob import TextBlob
nlp = spacy.load('en_core_web_sm')
# WordCloud & Matplotlib Packages
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from io import BytesIO
import random
import time
rawtext=''''''
# Initialize App
app = Flask(__name__)
Bootstrap(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/doc')
def doc():
return render_template('doc.html')
@app.route('/analyze',methods=['GET','POST'])
def analyze():
start = time.time()
# Receives the input query from form
if request.method == 'POST':
'''global rawtext
global phrases
global important
global bold'''
rawtext = request.form['rawtext']
# Analysis
docx = nlp(rawtext)
phrases=extract.phrases(rawtext)
important=extract.important(rawtext)
important=list(set(important))
bold=extract.bold(rawtext)
bold=list(set(bold))
# Tokens
custom_tokens = [token.text for token in docx ]
#print(custom_tokens)
#print(bold)
#print(important)
#print('\n\n')
#print(phrases)
text=highlight.highlight(rawtext,phrases,bold,important)
print(text)
initial="""
<html>
<head>
<title>Text Highlighted</title>
<style>
a { color: #ff6600; transition: .5s; -moz-transition: .5s; -webkit-transition: .5s; -o-transition: .5s; font-family: 'Muli', sans-serif; }
a:hover { text-decoration: underline }
h1 { padding-bottom: 15px }
h1 a { font-family: 'Open Sans Condensed', sans-serif; font-size: 48px; color: #333; }
h1 a:hover { color: #ff6600; text-decoration: none; }
p { color: #333; font-family: 'Muli', sans-serif; margin-bottom: 15px; }
a.more-link { color: white; font-weight: bold; font-size: 14px; font-family: Arial, Helvetica, sans-serif; padding: 3px 10px; background-color: #ff6600; border-radius: 5px; float: right; }
a.more-link:hover { text-decoration: none; background-color: #666; border-radius: 0px; }
</style>
</head>
<body>
<p>"""
towrite=initial+text+"""</p></body></html"""
f=open('templates/doc.html','w',encoding="utf8")
f.write(towrite)
f.close()
print(text)
f=open('text.txt','w')
f.write(rawtext)
f.close()
# Word Info
custom_wordinfo = [(token.text,token.lemma_,token.shape_,token.is_alpha,token.is_stop) for token in docx ]
custom_postagging = [(word.text,word.tag_,word.pos_,word.dep_) for word in docx]
# NER
custom_namedentities = [(entity.text,entity.label_)for entity in docx.ents]
blob = TextBlob(rawtext)
blob_sentiment,blob_subjectivity = blob.sentiment.polarity ,blob.sentiment.subjectivity
# allData = ['Token:{},Tag:{},POS:{},Dependency:{},Lemma:{},Shape:{},Alpha:{},IsStopword:{}'.format(token.text,token.tag_,token.pos_,token.dep_,token.lemma_,token.shape_,token.is_alpha,token.is_stop) for token in docx ]
allData = [('"Token":"{}","Tag":"{}","POS":"{}","Dependency":"{}","Lemma":"{}","Shape":"{}","Alpha":"{}","IsStopword":"{}"'.format(token.text,token.tag_,token.pos_,token.dep_,token.lemma_,token.shape_,token.is_alpha,token.is_stop)) for token in docx ]
result_json = json.dumps(allData, sort_keys = False, indent = 2)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,custom_tokens=phrases,custom_postagging=bold,custom_namedentities=important,custom_wordinfo=text,blob_sentiment=bold,blob_subjectivity=blob_subjectivity,final_time=final_time,result_json=result_json)
# API ROUTES
@app.route('/api')
def basic_api():
return render_template('restfulapidocs.html')
# API FOR TOKENS
@app.route('/api/tokens/<string:mytext>',methods=['GET'])
def api_tokens(mytext):
# Analysis
docx = nlp(mytext)
# Tokens
mytokens = [token.text for token in docx ]
return jsonify(mytext,mytokens)
# API FOR LEMMA
@app.route('/api/lemma/<string:mytext>',methods=['GET'])
def api_lemma(mytext):
# Analysis
docx = nlp(mytext.strip())
# Tokens & Lemma
mylemma = [('Token:{},Lemma:{}'.format(token.text,token.lemma_))for token in docx ]
return jsonify(mytext,mylemma)
# API FOR NAMED ENTITY
@app.route('/api/ner/<string:mytext>',methods=['GET'])
def api_ner(mytext):
# Analysis
docx = nlp(mytext)
# Tokens
mynamedentities = [(entity.text,entity.label_)for entity in docx.ents]
return jsonify(mytext,mynamedentities)
# API FOR NAMED ENTITY
@app.route('/api/entities/<string:mytext>',methods=['GET'])
def api_entities(mytext):
# Analysis
docx = nlp(mytext)
# Tokens
mynamedentities = [(entity.text,entity.label_)for entity in docx.ents]
#mynamedentities=[]
return jsonify(mytext,mynamedentities)
# API FOR SENTIMENT ANALYSIS
@app.route('/api/sentiment/<string:mytext>',methods=['GET'])
def api_sentiment(mytext):
# Analysis
blob = TextBlob(mytext)
mysentiment = [ mytext,blob.words,blob.sentiment ]
return jsonify(mysentiment)
# API FOR MORE WORD ANALYSIS
@app.route('/api/nlpiffy/<string:mytext>',methods=['GET'])
def nlpifyapi(mytext):
docx = nlp(mytext.strip())
allData = ['Token:{},Tag:{},POS:{},Dependency:{},Lemma:{},Shape:{},Alpha:{},IsStopword:{}'.format(token.text,token.tag_,token.pos_,token.dep_,token.lemma_,token.shape_,token.is_alpha,token.is_stop) for token in docx ]
return jsonify(mytext,allData)
# IMAGE WORDCLOUD
@app.route('/images')
def imagescloud():
return "Enter text into url eg. /fig/yourtext "
@app.route('/images/<mytext>')
def images(mytext):
return render_template("index.html", title=mytext)
@app.route('/fig/<string:mytext>')
def fig(mytext):
plt.figure(figsize=(20,10))
wordcloud = WordCloud(background_color='white', mode = "RGB", width = 2000, height = 1000).generate(mytext)
plt.imshow(wordcloud)
plt.axis("off")
img = BytesIO()
plt.savefig(img)
img.seek(0)
return send_file(img, mimetype='image/png')
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.