text stringlengths 38 1.54M |
|---|
seq = 'ACTGTC'
T = {'Y':{'Y':0.7, 'N':0.2, 'End':0.1},\
'N':{'Y':0.1, 'N':0.8, 'End':0.1},\
'Begin':{'Y':0.2, 'N': 0.8}}
E = {'Y':{'A':0.1, 'G':0.4, 'C':0.4, 'T':0.1},\
'N':{'A':0.25, 'G':0.25, 'C':0.25, 'T':0.1}}
states = ['Y', 'N']
def viterbi(seq, T, E, states):
traceback = []
matrix = [[0 for i in range(len(seq))]for j in range(len(states))]
for i in range(len(states)):
Begin = []
matrix[i][0] = T['Begin'][states[i]] * E[states[i]][seq[0]]
Begin.append(matrix[i][0])
if matrix[i][0] == max(Begin):
traceback.append(states[i])
for i in range(1, len(seq)):
for j in range(len(states)):
scores = []
for k in range(len(states)):
score = matrix[k][i-1] * T[states[k]][states[j]] * E[states[j]][seq[i]]
scores.append(score)
if score == max(scores):
matrix[j][i] = score
traceback.append(states[j])
for z in range(len(states)):
endscore = 0
endlist = []
endscores = matrix[z][len(seq)-1] * T[states[z]]['End']
endlist.append(endscores)
if endscores == max(endlist):
endscore = max(endlist)
traceback.append(states[z]+'E')
for i in range(len(states)):
print(matrix[i])
print(traceback)
return(endscore)
print(viterbi(seq, T, E, states))
|
import math
Matrix=[] # likelyhood matrix
Freq=[]
laplace=1 #laplace
V=3
_lines=28
#
#returns array of frequencies of the numbers
#
def frequencies(values):
ret_val= [0]*10
for value in values:
ret_val[int(value)]+=1
return ret_val
#
#reads the files with the images of text
#returns a list will all the images as given to us
def read_images(filename):
#ret val
global _lines
images=[]
file = open(filename, "r")
ctr = 0
curr_image=[]
for line in file:
# 28 lines per digit
if ctr == _lines :
ctr = 0
images.append(curr_image)
curr_image=[]
ctr+=1
curr_image.append(list(line))
images.append(curr_image)
return images
#
# returns values that are stored in file
#
def read_vals(filename):
values=[]
with open(filename, 'r') as file:
for line in file:
values.append(list(line)[0])
return values
#genearing licklyhood matrix
def genMatrix( images , values , freq ):
#will be 3 D
matrix=[]
global V
global laplace
for number in range(10):
count = freq[number]
row=[]
for i in range(0,28):
col=[0.0]*28
for j in range(0,28):
ctr = 0.0
for k in range(5000):
if int(values[k]) == number and images[k][i][j] =='+':
ctr+=0.33
elif int(values[k]) == number and images[k][i][j] =='#':
ctr+=1
#adjut laplace value
col[j]=max(( (float(ctr+laplace))/ float(count+V*laplace)),0.000000001)# to stop math error while taking log
row.append(col)
matrix.append(row)
return matrix
def getMax(poss):
return poss.index(max(poss))
def train():
global Freq
global Matrix
print ('--------------TRAINING---------------')
images = read_images('trainingimages')
values = read_vals('traininglabels')
Freq = frequencies(values)
print(len(images), len(values))
Matrix = genMatrix(images, values, Freq)
def test():
global Freq
global Matrix
print ('--------------TESTING---------------')
mat_len= len(Matrix)
print(mat_len)
testImages = read_images('testimages')
testVals = read_vals("testlabels")
test_results = []
i=0
poss=[]
for curr_image in testImages:
poss=[0.0]*mat_len
for digit in range(mat_len):
im_=0
temp=0
result=0.0
for i in range(28):
for j in range(28):
if curr_image[i][j]== "+":
result += 0.33* math.log(Matrix[digit][i][j])
elif curr_image[i][j]== '#':
result += math.log(Matrix[digit][i][j])
else:
result += math.log(1-Matrix[digit][i][j])
poss[digit]=(result)
test_results.append(getMax(poss))
print(len(test_results))
ctr=0
d=open("results_max.txt", 'w+')
fr=[0]*10
suc=[0]*10
idx_min=[0]*10
idx_values=[-99999999.0 for i in range(10)]
for idx in range(len(test_results)):
x=int(test_results[idx])
if int(test_results[idx]) == int(testVals[idx]):
result=8.0
for i in range(28):
for j in range(28):
if testImages[idx][i][j]== "+":
result += 0.33*math.log(Matrix[digit][i][j])
elif testImages[idx][i][j]== '#':
result += math.log(Matrix[digit][i][j])
else:
result += math.log(1-Matrix[digit][i][j])
if idx_values[x] < result:
#print("here")
idx_min[x]=idx
idx_values[x]=result
d.write("Image for 0\n")
for m in range(10):
for i in range(28):
for j in range(28):
d.write(testImages[idx_min[m]][i][j])
d.write("\n")
d.write(str(idx_min[m]))
d.write("-----------------\n")
d.close()
'''
d.write("Total Successes:\t"+str(ctr))
d.write("\nPercent:\t"+str(float(ctr)/10))
d.write("\n")
for idx in range(10):
d.write(str(idx)+ " Accuracy: "+ str(float(suc[idx])/fr[idx]))
d.write("\n")'''
#get_conf(test_results , testVals)
def get_conf(results, actual):
conf=[[0 for x in range(10)] for y in range(10)]
for i in range(len(results)):
#if int(results[i]) != int(actual[i]):
conf[int(results[i])][int(actual[i])] +=1
print(conf)
for i in range(10):
print(conf[i])
return conf
def main():
train()
test()
if __name__== '__main_':
main()
|
# 7月老師的思路是先佔位,然後在其他地方使用這個類
#最後再回過頭來實現這個類
# 下面就是先應用的過程
#1.首先實例化兩個redprint(自定義對象)
class Redprint:
# 編寫一個類的時候,首先要思考它的構造函數(如何實例化) 還要在紅圖中取實現裝飾器的功能
# (裝飾器不是憑空來的)(還是要借鑑藍圖)1.實例化構造函數 2. 實現裝飾器功能 3.實現url_prefix參數
# 另外在應用這個紅圖的時候,用到了url_prefix的參數設置
def __init__(self,name):
self.name = name
self.mound =[] #這裏是視圖函數的信息,所以列表
# route函數的作用就是接收一堆參數,然後調用藍圖的add_url_rule方法完成視圖函數向藍圖的書冊
def route(self,rule, **options): #參考藍圖進行改寫
def decorator(f): # f是裝飾器作用的方法
#要解決的是在紅圖中如何拿到藍圖的本身?
self.mound.append((f,rule,options))
return f
return decorator
def register(self,bp,url_prefix=None): # 在這個函數中有了藍圖,可以在這裏完成視圖那函數向紅圖的註冊方法
if url_prefix is None:
url_prefix = '/' + self.name
for f,rule,options in self.mound: # 序列解包(存入的時候是一個元組的形式,遍歷時自動解包成3個變量)
endpoint = options.pop("endpoint", f.__name__) #f.__name__ 取視圖函數的名字作爲默認值
bp.add_url_rule(url_prefix + rule,endpoint,f,**options)
# 注意路由的註冊額 路由設計從後往前
# 藍圖的url_prefix/紅圖的url_prefix/視圖函數中的rule
# url---->endpoint---->viewfunction
# 在這裏紅圖要做的事情和藍圖是一樣的。都是接收來自視圖函數的參數。並完成視圖函數對其的註冊
# 有url要訪問請求時
# url--->endpoint ----->viewfunciton(視圖函數)
#反之,有視圖函數,要訪問url
# viewfunciton(視圖函數)------>endpoint-------->url
|
import subprocess
""" To run a process and read all of its output, set the
stdout value to PIPE and call communicate().
the PIPE is just the common one-way IPC that we've been using: |
"""
print('read: ')
# below is a subprocess that run a command:
proc = subprocess.Popen(
['echo', '"to stdout"'],
stdout = subprocess.PIPE
)
stdout_value = proc.communicate()[0].decode('utf-8')
# repr() is a function that returns a printable representation of the given object.
print('stdout: ', repr(stdout_value))
|
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from page.base_page import BasePage
class Contact(BasePage):
_base_url = 'https://work.weixin.qq.com/wework_admin/frame#contacts'
def add_member(self):
self.driver.find_element(By.LINK_TEXT, "添加成员").click()
def input_info(self):
time.sleep(2)
# 添加姓名
self.driver.find_element(By.ID, "username").click()
self.driver.find_element(By.ID, "username").send_keys("Add_Name")
# 添加别名
self.driver.find_element(By.ID, "memberAdd_english_name").click()
self.driver.find_element(By.ID, "memberAdd_english_name").send_keys("AliasName")
# 添加账号
self.driver.find_element(By.ID, "memberAdd_acctid").click()
self.driver.find_element(By.ID, "memberAdd_acctid").send_keys("00002")
# 选择性别
self.driver.find_element(By.CSS_SELECTOR, '.ww_radio[value="2"]')
# 添加手机号码
self.driver.find_element(By.CSS_SELECTOR, ".ww_telInput_zipCode_input").click()
self.driver.find_element(By.CSS_SELECTOR, 'li[data-value="852"]').click()
self.driver.find_element(By.ID, "memberAdd_phone").click()
self.driver.find_element(By.ID, "memberAdd_phone").send_keys("13456789999")
# 添加座机
self.driver.find_element(By.ID, "memberAdd_telephone").click()
self.driver.find_element(By.ID, "memberAdd_telephone").send_keys("13456789999")
# 添加邮箱
self.driver.find_element(By.ID, "memberAdd_mail").click()
self.driver.find_element(By.ID, "memberAdd_mail").send_keys("qq@qq.com")
# 添加地址
self.driver.find_element(By.ID, "memberEdit_address").send_keys("GD")
# Todo选择部门
# 添加职务
self.driver.find_element(By.ID, "memberAdd_title").click()
self.driver.find_element(By.ID, "memberAdd_title").send_keys("CTO")
# 添加头像
self.driver.find_element(By.CSS_SELECTOR, '#js_upload_file').click()
self.driver.find_element(By.CSS_SELECTOR, '.js_file').send_keys(r"C:\Users\Alice\Desktop\g.jpg")
WebDriverWait(self.driver, 10).until(
lambda x: x.find_element_by_class_name('cropper-face'))
self.driver.find_element(By.CSS_SELECTOR, '.qui_btn.ww_btn.ww_btn_Blue.js_save').click()
# 保存
self.driver.find_element(By.CSS_SELECTOR, ".js_btn_save").click()
def modify_member(self, original_name, new_name):
# 等待用户列表界面
WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_class_name('js_title'))
xpath = "//td[contains(.," + '"' + original_name + '"' + ")]"
self.driver.find_element(By.XPATH, str(xpath)).click()
# self.driver.find_element(By.XPATH, '//td[contains(.,"只好")]').click()
WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_class_name('js_edit'))
self.driver.find_element(By.CSS_SELECTOR, '.js_edit').click()
self.driver.find_element(By.NAME, 'username').clear()
# 修改姓名
self.driver.find_element(By.NAME, 'username').send_keys(new_name)
# 保存
self.driver.find_element(By.CSS_SELECTOR, '.js_save').click()
def get_member_msg(self):
# 判断保存成功
# toast_loc = (By.XPATH, './/*[@class="ww_tip success"]')
toast_loc = (By.CSS_SELECTOR, ".ww_tip.success")
WebDriverWait(self.driver, 10).until(expected_conditions.presence_of_element_located(toast_loc))
WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_class_name('js_title'))
time.sleep(3)
result = []
# 获取新增用户的信息
for element in self.driver.find_elements(By.CSS_SELECTOR, '.member_colRight_memberTable_td'):
result.append(element.get_attribute('textContent'))
print(result)
return result
def get_modify_member_info(self):
WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_class_name(
'member_display_cover_detail_name'))
member_info = self.driver.find_element(By.CSS_SELECTOR, '.member_display_cover_detail_name').get_attribute(
'textContent')
return member_info
|
icor = int(input('Wat is uw ICOR cijfer? '))
prog = int(input('Wat is uw PROG cijfer? '))
csn = int(input('Wat is uw CSN cijfer? '))
gemiddelde = (csn+icor+prog)/3
beloning = (int((300/10))*csn)+(int((300/10))*icor+int(((300/10))*prog))
overzicht = 'Uw gemiddelde is {} en uw beloning daarvoor is {} euro.'.format(gemiddelde, beloning)
print('Uw gemiddelde is ', gemiddelde)
print('Uw beloning is ', beloning, 'euro.')
print(overzicht)
|
#!/usr/bin/env python3
import sqlite3
#connect to database file
dbconnect = sqlite3.connect("mydb.db");
#If we want to access columns by name we need to set
#row_factory to sqlite3.Row class
dbconnect.row_factory = sqlite3.Row;
#now we create a cursor to work with db
cursor = dbconnect.cursor();
#execute insetr statement
#execute simple select statement
cursor.execute('''INSERT INTO sensors values('1', "door", “kitchen”)''');
cursor.execute('''INSERT INTO sensors values('2', "temperature", “kitchen”)''');
cursor.execute('''INSERT INTO sensors values('3', "door", “garage”)''');
cursor.execute('''INSERT INTO sensors values('4,' "motion", “garage”)''');
cursor.execute('''INSERT INTO sensors values('5', "temperature", “garage”)''');
dbconnect.commit();
print data
cursor.execute('SELECT * FROM sensors');
for row in cursor:
if row['type']== "door" or row['zone']=="kitchen":
print(row['sensorID'],row['type'],row['zone'] );
#close the connection
dbconnect.close();
|
from omtools.comps.vectorized_pnorm_comp import VectorizedPnormComp
from omtools.comps.vectorized_axiswise_pnorm_comp import VectorizedAxisWisePnormComp
from omtools.core.variable import Variable
from typing import List
import numpy as np
def pnorm(expr, pnorm_type=2, axis=None):
'''
This function computes the pnorm
Parameters
----------
expr: Variable
The Variable(s) over which to take the minimum
pnorm_type: int
This specifies what pnorm to compute. Values must be nonzero positive and even.
axis: int
Specifies the axis over which to take the pnorm
'''
if not isinstance(expr, Variable):
raise TypeError(expr, " is not an Variable object")
if axis is not None:
if not isinstance(axis, int) and not isinstance(axis, tuple):
raise TypeError("axis must be an integer or tuple of integers")
out = Variable()
out.add_dependency_node(expr)
if pnorm_type % 2 != 0 or pnorm_type <= 0:
raise Exception(pnorm_type, " is not positive OR is not even")
else:
if axis == None:
out.build = lambda: VectorizedPnormComp(
shape=expr.shape,
in_name=expr.name,
out_name=out.name,
pnorm_type=pnorm_type,
val=expr.val,
)
else:
output_shape = np.delete(expr.shape, axis)
out.shape = tuple(output_shape)
out.build = lambda: VectorizedAxisWisePnormComp(
shape=expr.shape,
in_name=expr.name,
out_shape=out.shape,
out_name=out.name,
pnorm_type=pnorm_type,
axis=axis if isinstance(axis, tuple) else (axis, ),
val=expr.val,
)
return out
|
# cook your dish here
# cook your dish here
t=int(input())
for x in range(t):
a,b,c=input().split()
a=int(a)
b=int(b)
c=int(c)
d=180
if(a+b+c==d):
print("YES")
else:
print("NO")
|
"""
Data: different labels must be in folders starting with 0, then 1
"""
config = {
"data_path":"../data", # don't include a "slash" on the end
"batch_size":40,
"num_epoch":30,
"learning_rate":0.0001,
# Resize input images
"image_width":224,
"image_height":224
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityRiskVerifyidentityApplyModel(object):
def __init__(self):
self._account_id = None
self._account_name = None
self._account_type = None
self._biz_id = None
self._biz_params = None
self._scene_code = None
self._validate_product_group = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def account_name(self):
return self._account_name
@account_name.setter
def account_name(self, value):
self._account_name = value
@property
def account_type(self):
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def biz_params(self):
return self._biz_params
@biz_params.setter
def biz_params(self, value):
self._biz_params = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def validate_product_group(self):
return self._validate_product_group
@validate_product_group.setter
def validate_product_group(self, value):
self._validate_product_group = value
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.account_name:
if hasattr(self.account_name, 'to_alipay_dict'):
params['account_name'] = self.account_name.to_alipay_dict()
else:
params['account_name'] = self.account_name
if self.account_type:
if hasattr(self.account_type, 'to_alipay_dict'):
params['account_type'] = self.account_type.to_alipay_dict()
else:
params['account_type'] = self.account_type
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.biz_params:
if hasattr(self.biz_params, 'to_alipay_dict'):
params['biz_params'] = self.biz_params.to_alipay_dict()
else:
params['biz_params'] = self.biz_params
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.validate_product_group:
if hasattr(self.validate_product_group, 'to_alipay_dict'):
params['validate_product_group'] = self.validate_product_group.to_alipay_dict()
else:
params['validate_product_group'] = self.validate_product_group
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskVerifyidentityApplyModel()
if 'account_id' in d:
o.account_id = d['account_id']
if 'account_name' in d:
o.account_name = d['account_name']
if 'account_type' in d:
o.account_type = d['account_type']
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'biz_params' in d:
o.biz_params = d['biz_params']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'validate_product_group' in d:
o.validate_product_group = d['validate_product_group']
return o
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-14 14:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chica_agenda', '0005_auto_20181214_1139'),
]
operations = [
migrations.AlterField(
model_name='servico_item',
name='cancelado',
field=models.CharField(choices=[(b'1', b'Nao'), (b'2', b'Sim')], default=b'1', max_length=1),
),
]
|
import re
from os import path
import csv
from string import punctuation
import pandas as pd
import pymorphy2
from nltk.corpus import stopwords
CURRENT_DIR = path.dirname(path.abspath(__file__))
CITIES = set()
with open(path.join(CURRENT_DIR, 'regions.csv'), 'r', encoding='UTF-8') as csvfile:
rows = list(csv.reader(csvfile, delimiter=';', quotechar='|'))
for row in rows:
city = str(row[3]).strip().lower()
if city:
CITIES.add(city)
morph = pymorphy2.MorphAnalyzer()
STOP = set(stopwords.words('russian')) | set([str(i) for i in range(9)])
stops = set(stopwords.words('russian'))
def to_normal_form(words):
normals = []
for w in words:
w = re.sub(r'\W+', '', w)
if w:
w = morph.parse(w)[0].normal_form
normals.append(w)
return normals
def singular_words(sentence):
s = ''.join([c for c in sentence if c not in punctuation])
s = s.lower().split()
with_stop = [w for w in s if w in STOP]
without_stop = ' '.join([w for w in s if w not in STOP])
words_without_stop = to_normal_form(without_stop)
with_stop.extend(words_without_stop)
return with_stop
def normalize(text):
# text = replaces(text)
# text = ''.join([c for c in text if c not in punctuation])
# text = ''.join([c for c in text if c not in punctuation])
# text = replaces(text)
text = ''.join([c for c in text if c not in punctuation])
words = text.lower().split()
words = to_normal_form(words)
# words = [w for w in text if len(w) > 2]
# words = str(text).lower().split()
return words
def prepare(row):
return normalize(row)
# df = pd.read_csv(path.join(CURRENT_DIR, 'faq.csv'),
# names=['category', 'title', 'text'])
# FAQ_DF = df
# FAQ_RAW = df['title']
# FAQ = df['title'].apply(lambda x: prepare(x))
|
class Attrs:
def __init__(self, start):
self.wrapped = start
def __getattr__(self, attr):
return getattr(self.wrapped, attr)
def __setattr__(self, attr, value):
print("set", attr, value)
self.__dict__[attr] = value
|
import re
def parse_to_tuple(s):
partten = re.compile(r'\((\d+)\)(\d+)+@(.+)')
if partten.search(s) is None:
return '正确格式为 (023)68001111@office '
else:
return partten.search(s).groups()
def parse_to_dict(s):
partten = re.compile(r'\(([\d]+)\)(\d+)@(.+)')
result = partten.search(s)
if result is None:
return '正确格式为 (023)68001111@office '
else:
return dict(zip(('area', 'tel', 'addr'), result.groups()))
print(parse_to_tuple('(023)68001111@office'))
print(parse_to_dict('(023)68001111@office'))
|
import redis
import json
import h5py
import pickle
import numpy as np
import random
import jieba
import multiprocessing
word2idx, idx2word ,allwords, corpus = None, None,{},[]
DUMP_FILE = 'data/basic_data_700k_v2.pkl'
check_sample_size = 10
TF_THRES = 5
DF_THRES = 2
r0 = redis.StrictRedis(host='localhost', port=6379, db=0)
r1 = redis.StrictRedis(host='localhost', port=6379, db=1)
id_beg = 0
id_eos = 1
id_emp = 2
id_unk = 3
r = None
class Word:
def __init__(self,val,tf,df):
self.val = val
self.tf = tf
self.df = df
def __repr__(self):
pass
def parse_all_crawled_data(keys, idx):
res = []
if idx == 0:
conn = r0
else:
conn = r1
for data in conn.mget(keys):
data = json.loads(data)
key = data.get("group_id")
title = data.get("title","").replace('\t',' ')
abstract = data.get("abstract","").replace('\t',' ')
if abstract == "":
abstract = title
res.append((key,title,abstract))
return res
def cal_word_tf_df(corpus):
words = {}
title_abstract_pairs = []
for doc in corpus:
title, abstract = doc[1].lower(),doc[2].lower()
ts_ = list(jieba.cut(title,cut_all = False))
as_ = list(jieba.cut(abstract,cut_all = False))
title_abstract_pairs.append((ts_, as_))
# acumulate the term frequency
for word in ts_ + as_:
if not words.get(word):
words[word] = Word(val = word,tf = 1,df = 0)
else:
words[word].tf += 1
# acummulate the doc frequency
for word in set(ts_ + as_):
words[word].df += 1
return words,title_abstract_pairs
def build_idx_for_words_tf_df(chars,tf_thres = TF_THRES, df_thres = DF_THRES):
start_idx = id_unk + 1
char2idx = {}
idx2char = {}
char2idx['<eos>'] = id_eos
char2idx['<unk>'] = id_unk
char2idx['<emp>'] = id_emp
char2idx['<beg>'] = id_beg
#filter out tf>20 and df > 10 terms
chars = filter(lambda char:char.tf > tf_thres or char.df > df_thres,chars)
char2idx.update(dict([(char.val,start_idx + idx) for idx,char in enumerate(chars)]))
idx2char = dict([(idx,char) for char,idx in char2idx.items()])
return char2idx, idx2char
def prt(label, x):
print label+':',
for w in x:
if w == id_emp:
continue
print idx2word[w],
print
def worker(i,keys,idx):
print "worker [%2d] started with keys:[%d]!"%(i,len(keys))
corpus = parse_all_crawled_data(keys, idx)
print "worker [%2d] get docs :[%d]!"%(i,len(corpus))
words,sub_corpus = cal_word_tf_df(corpus)
return words,sub_corpus
def combine_results(res):
global copurs,word2idx,idx2word
words,sub_corpus = res[0], res[1]
corpus.extend(sub_corpus)
for word in words:
if word not in allwords:
allwords[word] = Word(val = word,tf = 0,df = 0)
allwords[word].tf += words[word].tf
allwords[word].df += words[word].df
word2idx, idx2word = build_idx_for_words_tf_df(allwords.values())
def dump_all_results():
datafile = open(DUMP_FILE,'wb')
titles, abstracts = [],[]
for ts_,as_ in corpus:
titles.append([word2idx.get(word,id_unk) for word in ts_])
abstracts.append([word2idx.get(word,id_unk) for word in as_])
pickle.dump((allwords, word2idx, idx2word, titles, abstracts),datafile,-1)
def check_dump():
allwords, word2idx, idx2word, titles, abstracts = pickle.load(open(DUMP_FILE))
print "allwords size is:",len(allwords)
print "word2idx size is:",len(word2idx)
print "titles size is:",len(titles)
for k in range(check_sample_size):
k = random.randint(0,len(titles) - 1)
print "[%s]th Example"%(k)
prt('title',titles[k])
prt('abstract',abstracts[k])
worker_size = 10
pool = multiprocessing.Pool()
for idx,conn in enumerate([r0,r1]):
keys = conn.keys()
batch = len(keys) / worker_size
for i in range(worker_size):
if i == worker_size - 1:
sub_keys = keys[i * batch : ]
else:
sub_keys = keys[i * batch : i * batch + batch]
pool.apply_async(worker,(idx * 10 + i,sub_keys,idx,),callback=combine_results)
pool.close()
pool.join()
dump_all_results()
check_dump()
print "all job finished!"
|
from tkinter import *
from tkinter.filedialog import *
import os
es ="" # 편집을 위한 전역변수 선언
def newFile():
top.title("제목없음-메모장")
file = None
ta.delete(1.0,END)
def openFile():
file = askopenfilename(title="파일 선택",filetypes=(("텍스트 파일","*.txt"),("모든 파일","*.*")))
top.title(os.path.basename(file)+"- 메모장")
ta.delete(1.0,END)
f = open(file,"r")
ta.insert(1.0,f.read())
f.close()
def saveFile():
f = asksaveasfile(mode ="w", defaultextension=".txt")
if f is None:
return
ts = str(ta.get(1.0,END))
f.write(ts)
f.close()
def cut():
global es
es=ta.get(SEL_FIRST,SEL_LAST)
ta.delete(SEL_FIRST,SEL_LAST)
def copy():
global es
es = ta.get(SEL_FIRST,SEL_LAST)
def paste():
global es
ta.insert(INSERT,es)
def delete():
ta.delete(SEL_FIRST,SEL_LAST)
top = Tk()
top.title("메모장")
top.geometry("400x400")
ta = Text(top)
sb = Scrollbar(ta)
sb.config(command=ta.yview)
sb.pack(side=RIGHT, fill=Y)
top.grid_rowconfigure(0,weight=1)
top.grid_columnconfigure(0,weight=1)
ta.grid(sticky = N +E+S+W) #ta가 동서남북을 다 채우도록 고정
file=None
mb = Menu(top)
fi = Menu(mb, tearoff=0)
fi.add_command(label="새 파일",command=newFile)
fi.add_command(label="열기",command=openFile)
fi.add_command(label="저장")
fi.add_separator() #분리선 추가
fi.add_command(label="종료")
mb.add_cascade(label="파일",menu=fi) #파일 메뉴를 메뉴바에 붙이기
e = Menu(mb,tearoff=0)
e.add_command(label="잘라내기")
e.add_command(label="복사")
e.add_command(label="붙이기")
e.add_command(label="삭제")
mb.add_cascade(label="편집",menu=e) #편집 메뉴를 메뉴바에 붙이기
h = Menu(mb,tearoff=0)
h.add_command(label="메모장 정보")
mb.add_cascade(label="도움말",menu=h) #도움말을 메뉴바에 붙이기
top.config(menu=mb)
top.mainloop()
|
# id=5150
# The following code should print out to the screen the number 1232 in its binary, octal and hexadecimal representations. You should complete the missing code.
# number=1232
# print(number)
# number_bin = _____
# number_hex = _____
# number_oct = _____
# print(number_bin)
# print(number_hex)
# print(number_oct)
number = 1232
print("Number:{0}".format(number))
number_bin = number
number_hex = number
number_oct = number
print("number_bin= {0:b}".format(number_bin))
print("number_hex= {0:x}".format(number_hex))
print("number_oct= {0:o}".format(number_oct))
|
print("What is the first number?")
value1 = input()
number1 = int(value1)
print("What is the second number?")
number2 = int(input())
if number1 == number2:
print("THEY ARE THE SAME!!!!")
else:
if number1 > number2:
print("The biggest is " + str(number1))
else:
print("The biggest is " + str(number2))
|
"""
Some functions to make testing slightly easier.
"""
from typing import List
def assert_all_equal(left: List, right: List):
"""
Assert that each element of left is == each element of right in order.
"""
assert len(left) == len(right)
assert all(x == y for x, y in zip(left, right))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 13:34:49 2018
@author: orlando
"""
import pandas as pd
import numpy as np
import collections
import csv
from sklearn import linear_model
from sklearn import metrics
#carrega os datasets
train=pd.read_csv('dataset.csv', sep=';') # contem a base de historico de todos o jogos entre selecoes na copa
test=pd.read_csv('datasetTest.csv', sep=';') #contem a tabela da fase de grupo copa do mundo Russia 2018
teams=pd.read_csv('teams.csv', sep=';') #contem a base de selecoes, seu ranking se esta classificada para copa da Russia ou nao
copa=pd.read_csv('./Datasets/Copa.csv', sep=';') #historico de campeoes de copas do mundo e sua sede
grupoTime=pd.read_csv('grupoTime.csv', sep=';') # contem informacoes dos grupos da copa do mundo Russia 2018
jogadores=pd.read_csv('jogadores.csv', sep=';') #contem base dos jogadores convocados de cada selecao para a copa Russia 2018
cartoes=pd.read_csv('cartoes.csv', sep=';') #contem base historica de cartoes por selecao
cartoesTest=pd.read_csv('cartoesTest.csv', sep=';') #contem base de teste de cartoes para cada jogo
######################################################################
### Contem funcoes relativas ao time
### Eh muito util para o funcionamento do programa
######################################################################
# 10 melhores times pelo raking da fifa
list10BestTeams=['Germany', 'Brazil', 'Belgium', 'Portugal', 'Argentina', 'Switzerland',
'France', 'Poland', 'Peru', 'Spain']
listCabecaChave=['Germany', 'Brazil', 'Belgium', 'Portugal', 'Argentina', 'Switzerland', 'France', 'Russia']
def getRankFifa(team_id, teams):
return int(teams[teams['id'] == team_id].values[0][3])
def getTeamName(team_id, teams):
return teams[teams['id'] == team_id].values[0][1]
def getTeamID(team_name, teams):
return teams[teams['nome'] == team_name].values[0][0]
# funcao para checar se um time esta dentro do grupo das 10 melhores selecoes pelo ranking da fifa
def checkBestTeam(team_id, teams):
teamName = getTeamName(team_id, teams)
if (teamName in list10BestTeams):
return 1
else:
return 0
def checkCabecaChave(team_id, teams):
teamName = getTeamName(team_id, teams)
if(teamName in listCabecaChave):
return 1
else:
return 0
def checkChampion(team_id, year, copa, teams):
year_champ = copa[copa['Ano'] == year]
champ = year_champ['Campeao'].values
if getTeamName(team_id, teams) == champ:
return 1
else:
return 0
# Numero de campeonatos uma selecao tem
def getNumMundiaisTeam(team_id, teams, copa):
campeoesList = copa['Campeao'].tolist()
name = getTeamName(team_id, teams)
return campeoesList.count(name)
######################################################################
### FIM - Das funcoes relativas ao time
######################################################################
###################################################################################
######## Pre processamento e preparacao dos dados para a analise
###################################################################################
#para os paises que n tiver ranking fifa setar 0
teams['rnk'].fillna(0, inplace=True)
#para os paises que n tiver participacao em copa setar 0
teams['participacoes_copa'].fillna(0, inplace=True)
#funcao responsavel por montar uma base historica de treinamento para test e train
# esta base servira para definar qual time irah fazer mais gols na copa
def createGols(dataset, typeDataset):
if typeDataset == 'Train':
golsTimeCasa=dataset.filter(items=['ano', 'id_casa', 'gols_casa'])
golsTimeCasa=golsTimeCasa.rename(columns={'id_casa': 'time', 'gols_casa': 'gols'})
golsTimeFora=dataset.filter(items=['ano', 'id_fora', 'gols_fora'])
golsTimeFora=golsTimeFora.rename(columns={'id_fora': 'time', 'gols_fora': 'gols'})
golsPartidas=pd.concat([golsTimeCasa, golsTimeFora])
golsPartidas=golsPartidas.groupby(['time', 'ano'], as_index=False).gols.sum()
if typeDataset == 'Test':
golsPartidas=teams[teams['classificado_russia'] == 1].filter(items=['id']).rename(columns={'id': 'time'})
golsPartidas['ano']=2018
golsTrain = pd.DataFrame()
golsTrain=golsPartidas
golsTrain['goleiro']=0
golsTrain['defesa']=0
golsTrain['meio']=0
golsTrain['ataque']=0
for index, row in golsTrain.iterrows():
time=row['time']
defesa, ataque, goleiro, meio = desempenhoPosicao(time, teams)
golsTrain.ix[(golsTrain['time'] == time), 'goleiro'] = goleiro
golsTrain.ix[(golsTrain['time'] == time), 'defesa'] = defesa
golsTrain.ix[(golsTrain['time'] == time), 'meio'] = meio
golsTrain.ix[(golsTrain['time'] == time), 'ataque'] = ataque
return golsTrain
#Funcao responsavel por pegar o desempenho de cada time por posicao
# da a cada time seu potencial defencivo, ofensivo bem como seu potencial criativo de meio campo
def desempenhoPosicao(team_id, teams):
desempenho=['posicao', 'acceleration', 'aggression', 'agility', 'balance', 'ball_control', 'composure',
'crossing', 'curve', 'dribbling', 'finishing', 'gk_diving', 'gk_handling', 'gk_kicking', 'gk_positioning', 'gk_reflexes',
'heading_accuracy', 'interceptions', 'jumping', 'long_passing', 'penalties', 'positioning']
jogadoresTeam=jogadores[jogadores['team'] == getTeamName(team_id, teams)]
jogadoresTeam=jogadoresTeam[list(desempenho)]
media=jogadoresTeam.groupby(['posicao']).mean().mean(1)
try:
defesa = media['DF']
ataque = media['FW']
goleiro = media['GK']
meio = media['MF']
except KeyError:
defesa = 0
ataque = 0
goleiro = 0
meio = 0
lista=[defesa, ataque, goleiro, meio]
return lista
# Funcao responsavel por definir em colunas no proprio dataframe de treinamento
# quem foi o ganhador de uma partida, perdedor ou se houve empate,
# quantos gols o vencedor, perdedor ou os times que empataram fez
# define tambem se time jogou em casa ou fora, neste caso, se o time esta do lado esquerdo ou direito da chave
def resultGame(train):
conditions = [
(train['gols_casa'] > train['gols_fora']),
train['gols_fora'] > train['gols_casa']]
choices = [train['id_casa'], train['id_fora']]
train['vencedor'] = np.select(conditions, choices, default=np.nan)
train['vencedor'].fillna(-1, inplace=True) # atribuindo -1 caso for empate
choices = [train['id_fora'], train['id_casa']]
train['perdedor'] = np.select(conditions, choices, default=np.nan)
train['perdedor'].fillna(-1, inplace=True) # atribuindo -1 casa for empate
choices = [train['gols_casa'], train['gols_fora']]
train['gols_vencedor'] = np.select(conditions, choices, default=np.nan)
train['gols_vencedor'].fillna(train['gols_casa'], inplace=True)
choices = [train['gols_fora'], train['gols_casa']]
train['gols_perdedor'] = np.select(conditions, choices, default=np.nan)
train['gols_perdedor'].fillna(train['gols_casa'], inplace=True)
condition = [
(train['vencedor'] == train['id_casa']),
(train['vencedor'] == train['id_fora'])]
choices = [1, -1]
train['vencedor_onde'] = np.select(condition, choices, default=np.nan)
train['vencedor_onde'].fillna(0, inplace=True)
# Funcao responsavel por gerar os insights de cada time, ou seja, a quantidade de vitorias que time teve,
# quantos titulos ela tem em copas, quantidade de derrotas, quantidade de participacoes em copa,
# total de jogos e suas respectivas medias, seu ranking na fifa, se um time e cabeca de chave e etc
def getParticipacaoGeral(team_id):
gamesWon = train[train.vencedor == team_id]
totalGolsPartidaWon = gamesWon['gols_vencedor'].sum()
gamesLost = train[train.perdedor == team_id]
totalGolsPartidaLost = gamesLost['gols_perdedor'].sum()
gamesDraw = train[(train.vencedor == -1) & ((train.id_casa == team_id) | (train.id_fora == team_id))]
participacoes = teams[teams.id == team_id].values[0][6]
totalGames = pd.concat([gamesWon, gamesDraw, gamesLost])
numGames = len(totalGames.index)
nVitorias = len(gamesWon.index)
nDerrotas = len(gamesLost.index)
nEmpate = len(gamesDraw.index)
if numGames == 0:
mediaGolsMarcados=0
mediaGolsSofridos=0
mediaVitorias=0
mediaDerrotas=0
mediaEmpate=0
else:
mediaGolsMarcados=totalGolsPartidaWon/numGames
mediaGolsSofridos=totalGolsPartidaLost/numGames
mediaVitorias=nVitorias/numGames
mediaDerrotas=nDerrotas/numGames
mediaEmpate=nEmpate/numGames
return [nVitorias, mediaGolsMarcados, mediaGolsSofridos, mediaVitorias, mediaDerrotas, mediaEmpate, participacoes, checkBestTeam(team_id, teams), getRankFifa(team_id, teams), checkCabecaChave(team_id, teams), desempenhoPosicao(team_id, teams)[0], desempenhoPosicao(team_id, teams)[1], desempenhoPosicao(team_id, teams)[2], desempenhoPosicao(team_id, teams)[3] ]
# Gera uma "dicionarizacao" de todos os times em memoria
def createDic():
#gerando uma lista de nomes dos times
teamList = teams['nome'].tolist()
dicFase = collections.defaultdict(list)
for t in teamList:
team_id = teams[teams['nome'] == t].values[0][0]
team_vector = getParticipacaoGeral(team_id)
dicFase[team_id] = team_vector
return dicFase
# funcao ira criar toda a base de treinamento para a execucao do modelo de dados.
def createTrainings():
totalNumGames = len(train.index) #pega a quantidade de jogos historica de copas do mundo
numFeatures= len(getParticipacaoGeral(447)) #desempenho do Brazil
xTrain= np.zeros((totalNumGames, numFeatures + 1))
yTrain= np.zeros((totalNumGames))
team_dic = createDic() # faz a dicionarizacao dos dados
count=0
for index, row in train.iterrows():
w_team = row['vencedor']
w_vector = team_dic[w_team]
l_team = row['perdedor']
l_vector = team_dic[l_team]
diff = [a - b for a, b in zip(w_vector, l_vector)]
home = row['vencedor_onde']
if (count % 2 == 0):
diff.append(home)
xTrain[count] = diff
yTrain[count] = 1
else:
diff.append(-home)
xTrain[count] = [ -p for p in diff]
yTrain[count] = 0
count +=1
return xTrain, yTrain
# Funcao cria efetivamente o modelo de predicao de acordo com a base de teste
# lembrando que a base de teste e a tabele da copa da russia 2018
def createPrediction():
results = [[0 for x in range(5)] for x in range(len(test.index))]
for index, row in test.iterrows():
team1_id = row['id_casa']
team2_id = row['id_fora']
team1_vector = getParticipacaoGeral(int(team1_id))
team2_vector = getParticipacaoGeral(int(team2_id))
pred = predictGame(team1_vector, team2_vector, 0, model)
results[index][0] = row['partida_id']
results[index][1] = getTeamName(team1_id, teams)
results[index][2] = pred[0] * 100
results[index][3] = getTeamName(team2_id, teams)
results[index][4] = row['fase']
results = pd.np.array(results)
firstRow = [[0 for x in range(5)] for x in range(1)]
firstRow[0][0] = 'Id'
firstRow[0][1] = 'Time1'
firstRow[0][2] = 'Porcentagem'
firstRow[0][3] = 'Time2'
firstRow[0][4] = 'Fase'
with open("result.csv", "w") as f:
writer = csv.writer(f)
writer.writerows(firstRow)
writer.writerows(results)
# Funcao para fazer a predicao de desempenho entre dois times
def predictGame(team_1_vector, team_2_vector, home, model):
diff = [a - b for a, b in zip(team_1_vector, team_2_vector)]
diff.append(home)
return model.predict([diff])
#return model.predict_proba([diff])
def prepareDatasetCards(dataset):
features=['team', 'positioning', 'interceptions', 'heading_accuracy', 'ball_control', 'aggression']
jogadoresCartoes=jogadores
jogadoresCartoes=jogadoresCartoes[list(features)]
media=jogadoresCartoes.groupby(['team'], as_index=False).mean()
dataset=dataset.join(media.set_index('team'), on='nome', how='left')
featureTeam=['positioning', 'interceptions', 'heading_accuracy', 'ball_control', 'aggression']
for f in featureTeam:
dataset[f].fillna(0, inplace=True) #substituindo o valor NaN por 0
return dataset
##########################################################################
###### Fim da definicao de funcoes
###########################################################################
##################################################################################################
###### Execucao do modelo de data mining no conjunto de dados ja preprocessados
##################################################################################################
# definindo os resultados das partidas para o conjunto de treinamento
# ou seja, a funcao faz o pre-processamento da base de dados definindo o ganhador e perdedor de cada partida
resultGame(train)
xTrain, yTrain = createTrainings() # executando a funcao de treinamento com base no dataframe train fase de grupos
# Dividindo o conjunto de dados em Treino e test e difinindo um percentual de teste
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(xTrain, yTrain, test_size=0.30, random_state = 0)
# definindo o modelo de treinamento
# dando um fit no modelo passando o conjunto de teste e de treino
# Foi executado diversos algoritmos e o linear_model.BayesianRidge() foi o que teve mais precisao para
# a base de treinamento proposta
model = linear_model.BayesianRidge()
model.fit(X_train, Y_train)
preds = model.predict(X_test)
#Imprimindo a precisao do modelo
accuracy = metrics.accuracy_score(preds.round(),Y_test)
print ("Accuracy : %s" % "{0:.3%}".format(accuracy))
# Criando a matriz de confusao
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Y_test, preds.round())
# criando a precisao do modelo em arquivo com base no conjunto de gerado de treino, teste e predicao
createPrediction()
##################################################################################################
###### Fim da execucao do modelo de data mining no conjunto de dados ja preprocessados
##################################################################################################
#######################################################################################
############ Criando a tabela de classificacao com base no resultado da predicao
#######################################################################################
jogos=pd.read_csv('result.csv')
tabela=grupoTime[grupoTime['ano'] == 2018]
tabela=tabela.filter(items=['id', 'team', 'grupo'])
tabela['Pontos']=0
tabela['Vitorias']=0
for index, row in jogos.iterrows():
if row['Porcentagem'] >= 55:
tabela.ix[(tabela.team == row['Time1']), 'Pontos'] += 3
tabela.ix[(tabela.team == row['Time1']), 'Vitorias'] = getParticipacaoGeral(getTeamID(row['Time1'], teams))[0]
if (row['Porcentagem'] < 55) & (row['Porcentagem'] >= 45):
tabela.ix[(tabela.team == row['Time1']), 'Pontos'] += 1
tabela.ix[(tabela.team == row['Time2']), 'Pontos'] += 1
tabela.ix[(tabela.team == row['Time1']), 'Vitorias'] = getParticipacaoGeral(getTeamID(row['Time1'], teams))[0]
tabela.ix[(tabela.team == row['Time2']), 'Vitorias'] = getParticipacaoGeral(getTeamID(row['Time2'], teams))[0]
if row['Porcentagem'] < 45:
tabela.ix[(tabela.team == row['Time2']), 'Pontos'] += 3
tabela.ix[(tabela.team == row['Time2']), 'Vitorias'] = getParticipacaoGeral(getTeamID(row['Time2'], teams))[0]
tabela = tabela.sort_values(['grupo', 'Pontos', 'Vitorias'], ascending=[1, 0, 0])
tabela = tabela.reset_index(drop=True)
#######################################################################################
############ Trabalhando com a predicao para os times classificados para as oitavas
#######################################################################################
oitavas = []
#distribuindo os dois primeiros colocados em cada grupo para o chaveamento das oitavas
for i in range(0, 32, 8):
if (i % 2 == 0):
oitavas.append((int(tabela.loc[i][0]), int(tabela.loc[i+5][0]), i, 'oitavas'))
oitavas.append((int(tabela.loc[i+1][0]), int(tabela.loc[i+4][0]), i+1, 'oitavas'))
oitavas = pd.DataFrame(oitavas)
oitavas=oitavas.rename(columns={0: 'id_casa', 1: 'id_fora', 2: 'partida_id', 3: 'fase'})
test=oitavas #jogando um uma nova base de test
createPrediction() #criando a predicao e jogando o resultado em arquivo
#######################################################################################
############ Definindo os ganhadores das oitavas e gerando o resultado para as quartas
#######################################################################################
vencedorOitavas = pd.read_csv('result.csv')
quartas = []
for i in range(0, 8, 2):
if( vencedorOitavas.loc[i]['Porcentagem'] > 50.0):
time1=vencedorOitavas.loc[i]['Time1']
else:
time1=vencedorOitavas.loc[i]['Time2']
if( vencedorOitavas.loc[i+1]['Porcentagem'] > 50.0):
time2=vencedorOitavas.loc[i+1]['Time1']
else:
time2=vencedorOitavas.loc[i+1]['Time2']
quartas.append((int(getTeamID(time1, teams)), int(getTeamID(time2, teams)), i, 'quartas'))
quartas = pd.DataFrame(quartas)
quartas=quartas.rename(columns={0: 'id_casa', 1: 'id_fora', 2: 'partida_id', 3: 'fase'})
test=quartas #jogando um uma nova base de test
createPrediction() #criando a predicao e jogando o resultado em arquivo
#################################################################################################
############ Definindo os ganhadores das quartas e gerando o resultado para as semis-finais
#################################################################################################
vencedorQuartas = pd.read_csv('result.csv')
semi = []
for i in range(0, 4, 2):
if( vencedorQuartas.loc[i]['Porcentagem'] > 50.0):
time1=vencedorQuartas.loc[i]['Time1']
else:
time1=vencedorQuartas.loc[i]['Time2']
if( vencedorQuartas.loc[i+1]['Porcentagem'] > 50.0):
time2=vencedorQuartas.loc[i+1]['Time1']
else:
time2=vencedorQuartas.loc[i+1]['Time2']
semi.append((int(getTeamID(time1, teams)), int(getTeamID(time2, teams)), i, 'semi'))
semi = pd.DataFrame(semi)
semi=semi.rename(columns={0: 'id_casa', 1: 'id_fora', 2: 'partida_id', 3: 'fase'})
test=semi #jogando um uma nova base de test
createPrediction() #criando a predicao e jogando o resultado em arquivo
#################################################################################################
############ Definindo os ganhadores das semi-finais e gerando o resultado para as finais
#################################################################################################
vencedorSemi = pd.read_csv('result.csv')
final = []
for i in range(0, 2, 2):
if( vencedorSemi.loc[i]['Porcentagem'] > 50.0):
time1=vencedorSemi.loc[i]['Time1']
else:
time1=vencedorSemi.loc[i]['Time2']
if( vencedorSemi.loc[i+1]['Porcentagem'] > 50.0):
time2=vencedorSemi.loc[i+1]['Time1']
else:
time2=vencedorSemi.loc[i+1]['Time2']
final.append((int(getTeamID(time1, teams)), int(getTeamID(time2, teams)), i, 'final'))
final = pd.DataFrame(final)
final=final.rename(columns={0: 'id_casa', 1: 'id_fora', 2: 'partida_id', 3: 'fase'})
#define o confronto do terceiro e quarto lugar
terceiroQuarto = []
for i in range(0, 2, 2):
if( vencedorSemi.loc[i]['Porcentagem'] < 50.0):
time1=vencedorSemi.loc[i]['Time1']
else:
time1=vencedorSemi.loc[i]['Time2']
if( vencedorSemi.loc[i+1]['Porcentagem'] < 50.0):
time2=vencedorSemi.loc[i+1]['Time1']
else:
time2=vencedorSemi.loc[i+1]['Time2']
terceiroQuarto.append((int(getTeamID(time1, teams)), int(getTeamID(time2, teams)), i, 'terceiro'))
terceiroQuarto = pd.DataFrame(terceiroQuarto)
terceiroQuarto=terceiroQuarto.rename(columns={0: 'id_casa', 1: 'id_fora', 2: 'partida_id', 3: 'fase'})
test=terceiroQuarto
createPrediction()
vencedorTerceiroQuarto = pd.read_csv('result.csv')
test=final #jogando um uma nova base de test
createPrediction() #criando a predicao e jogando o resultado em arquivo
vencedorFinal = pd.read_csv('result.csv')
#Variavel contem os resultados finais das predicoes
resultadosFinais=pd.concat([vencedorOitavas, vencedorQuartas, vencedorSemi, vencedorFinal]) #criando um consolidado de resultados finais
print('***** Resultados Finais dos jogos ******')
print(resultadosFinais)
def resultadosFinais(dataframe):
if dataframe['Porcentagem'].values > 50:
ganhador=dataframe['Time1'][0]
perdedor=dataframe['Time2'][0]
else:
ganhador=dataframe['Time2'][0]
perdedor=dataframe['Time1'][0]
return [ganhador, perdedor]
##############################################################################################
# Define que serah o time que mais ira fazer gols na copa
# Como parametro de analise serah criado uma base com potencial de cada time, bem como
# seu historico de gols em copas passadas
#############################################################################################
golsTrain = createGols(train, 'Train')
golsTest = createGols(teams, 'Test')
fieldsTrain=['time', 'ano', 'goleiro', 'defesa', 'meio', 'ataque']
target=['gols']
xTrainGols=golsTrain[list(fieldsTrain)].values
yTrainGols=golsTrain[target].values
xTestGols=golsTest[list(fieldsTrain)].values
y = yTrainGols.ravel()
yTrainGols = np.array(y).astype(int)
from sklearn.cross_validation import train_test_split
X_Golstrain, X_Golstest, y_Golstrain, y_test = train_test_split(xTrainGols, yTrainGols, test_size=0.30, random_state = 0)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 100, criterion = 'entropy', random_state = 0, max_features=4)
classifier.fit(X_Golstrain, y_Golstrain)
# Predicao dos resultados da base de teste
y_pred = classifier.predict(X_Golstest)
# Predicao da base de times que estao na copa da Russia
y_pred = classifier.predict(xTestGols)
gols=pd.DataFrame(y_pred).rename(columns={0: 'Qtde'})
xTestGols=pd.DataFrame(xTestGols)
golsSelecoes=pd.concat([xTestGols, gols], axis=1).rename(columns={0: 'time'})
maiorGoleador=int(golsSelecoes.ix[golsSelecoes['Qtde'].idxmax()][0])
print('Time goleador da Copa Russia 2018: ' + getTeamName(maiorGoleador, teams))
#####################################################################################
# Prepara a base e monta o modelo preditivo para Cartoes
# O modelo ira responder mais pais com mais cartoes amarelos e vermelhos
#####################################################################################
#coloca na base de cartoes informacoes do tipo agressividade, posicionamento, controle de bola
cartoes=prepareDatasetCards(cartoes)
cartoesApp=cartoes.drop(['nome'], axis=1)
target=['qtde']
features=list(set(list(cartoesApp.columns))-set(target))
xCartoesTrain=cartoesApp[list(features)].values
yCartoesTrain=cartoesApp[target].values
#carrega a base de test para a predicao do modelo de cartoes
xCartoesTest=prepareDatasetCards(cartoesTest)
xCartoesTest=xCartoesTest.drop(['nome'], axis=1).values
X_Cartoestrain, X_Cartoestest, y_Cartoestrain, y_test = train_test_split(xCartoesTrain, yCartoesTrain, test_size=0.30, random_state = 0)
#classifier = RandomForestClassifier(n_estimators = 100, criterion = 'entropy', random_state = 0, max_features=4)
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_Cartoestrain, y_Cartoestrain)
# Predicao dos resultados da base de teste
y_pred = classifier.predict(X_Cartoestest)
# Predicao dos resultados da base de teste
y_pred = classifier.predict(xCartoesTest)
cartoesQuantidade=pd.DataFrame(y_pred).rename(columns={0: 'Qtde'})
xCartoesTest=pd.DataFrame(xCartoesTest)
predictCartoes=pd.concat([xCartoesTest, cartoesQuantidade], axis=1).rename(columns={0: 'time'})
maisCartoes=predictCartoes.groupby(['time'], as_index=False).Qtde.sum()
maisCartoes=int(maisCartoes.ix[maisCartoes['Qtde'].idxmax()][0])
print('Pais com mais cartoes amarelos e vermelhos: ' + getTeamName(maisCartoes, teams))
print('**** Quarto Lugar: ' + resultadosFinais(vencedorTerceiroQuarto)[1])
print('*** Terceiro Lugar: ' + resultadosFinais(vencedorTerceiroQuarto)[0])
print('** Vice-Campeao: ' + resultadosFinais(vencedorFinal)[1])
print('* Campeao: ' + resultadosFinais(vencedorFinal)[0]) |
# Generated by Django 3.1.5 on 2021-02-13 10:24
from django.db import migrations
import easy_thumbnails.fields
import users.models
class Migration(migrations.Migration):
dependencies = [
('courses', '0010_auto_20210213_1302'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=easy_thumbnails.fields.ThumbnailerImageField(default='course/image.png', upload_to=users.models.unique_file_name, verbose_name='Изображение'),
),
]
|
"""
Author
--------
Best regards,
Sungjin (James) Kim, PhD
Postdoc, CCB in Harvard
sungjinkim@fas.harvard.edu
[Web] http://aspuru.chem.harvard.edu/james-sungjin-kim/
[Linkedin] https://www.linkedin.com/in/jamessungjinkim
[Facebook] https://www.facebook.com/jamessungjin.kim
[alternative email] jamessungjin.kim@gmail.com
Licence
---------
MIT License
"""
from __future__ import print_function
# I started to use __future__ so as to be compatible with Python3
import numpy as np
from sklearn import linear_model
from sklearn import cross_validation
from sklearn import metrics
import pandas as pd
from collections import OrderedDict
# To improve the speed, I using pyx.
import jpyx
import jutil
from jsklearn import codes
def mld( r_l, mod_l = [-0.70710678, 0.70710678]):
"""
maximum likelihood detection
r_l: received signals after reception processing
mod_l: list of all modulation signals
BPSK: [-0.70710678, 0.70710678]
return the demodulated signals (0, 1, ...)
"""
sd_l = list() # store demodulated signal
for r in r_l:
dist = list() #Store distance
for m in mod_l:
d = np.power( np.abs( r - m), 2)
dist.append( d)
sd = np.argmin( dist)
sd_l.append( sd)
return np.array( sd_l)
def calc_BER( r_l, x_l):
"""
calculate bit error rate (BER)
r_l: demodulated signals (ndarray, 1D)
x_l: transmitted signals (ndarray, 1D)
"""
err_l = r_l - x_l
errs = np.where( err_l != 0)[0]
# print 'err_l =', err_l
# print 'errs =', errs
Nerr = len(np.where( err_l != 0)[0])
return float( Nerr) / len( err_l), Nerr
def db2var( SNRdB):
return np.power( 10.0, SNRdB / 10.0)
def gen_BPSK(Nx, Nt):
"""
Generate BPSK modulated signals
"""
BPSK = np.array( [1, -1]) / np.sqrt( 2.0)
s_a = np.random.randint( 0, 2, Nx * Nt)
x_flat_a = BPSK[ s_a]
x_a = np.reshape( x_flat_a, (Nx, Nt))
return BPSK, s_a, x_flat_a, x_a
def gen_H( Nr, Nt):
return np.random.randn( Nr, Nt)
def gen_Rx( Nr, Nx, SNR, H_a, x_a):
"""
The received signals are modeled.
"""
n_a = np.random.randn( Nr, Nx) / np.sqrt( SNR)
y_a = np.dot( H_a, x_a.T) + n_a
return y_a
def normalize( W_a):
"Weight is normalized."
nW_a = np.linalg.norm( W_a, axis = 1)
for a0 in range( W_a.shape[0]):
W_a[a0,:] = np.divide( W_a[a0,:], nW_a[a0])
return W_a
class MIMO(object):
"""
Modeling for a MIMO wireless communication system.
"""
def __init__(self, Nt = 2, Nr = 4, Nx = 10, SNRdB = 10, model = "Ridge", Npilot = 10, Nloop = 10):
"""
The parameter of 'model' determines the regression method.
"""
self.set_param( (Nt, Nr, Nx, SNRdB))
self.model = model
self.Npilot = Npilot
self.Nloop = Nloop
# The function of test_ridge_all() uses 3 cases for testing.
# self.N_test_ridge_all = 3
def set_param( self, param_NtNrNxSNRdB):
Nt, Nr, Nx, SNRdB = param_NtNrNxSNRdB
# The antenna configuration is conducted.
self.Nt = Nt
self.Nr = Nr
# No of streams is fixed.
self.Nx = Nx
# Initial SNR is defined
self.SNRdB = SNRdB
self.SNR = db2var(SNRdB)
def _gen_BPSK_r0(self):
"""
Generate BPSK modulated signals
"""
self.BPSK = np.array( [1, -1]) / np.sqrt( 2.0)
self.s_a = np.random.randint( 0, 2, self.Nx * self.Nt)
self.x_flat_a = self.BPSK[ self.s_a]
self.x_a = np.reshape( self.x_flat_a, (self.Nx, self.Nt))
def gen_BPSK( self):
"""
Generate BPSK signals using global function gen_BPSK().
This function will be used to generate pilot signal as well.
"""
self.BPSK, self.s_a, self.x_flat_a, self.x_a = gen_BPSK( self.Nx, self.Nt)
def gen_H(self):
"""
The MIMO channel is generated.
"""
self.H_a = gen_H( self.Nr, self.Nt)
def _gen_Rx_r0(self):
"""
The received signals are modeled.
"""
self.n_a = np.random.randn( self.Nr, self.Nx) / np.sqrt( self.SNR)
self.y_a = np.dot( self.H_a, self.x_a.T) + self.n_a
def gen_Rx(self):
"""
The received signals are modeled.
"""
self.y_a = gen_Rx( self.Nr, self.Nx, self.SNR, self.H_a, self.x_a)
def gen_WR_ideal(self):
"""
The reception process with ideal channel estimation
is conducted.
each reception vector of W_a should be noramlized to one.
"""
self.W_a = np.linalg.pinv( self.H_a)
# The reception signal vector is transposed.
self.gen_Decoding()
def gen_WR_pilot(self, pilot_SNRdB):
"""
The reception process with pilot channel estimation
is conducted.
Pilot will be transmitted through random information channel.
"""
pilot_SNR = db2var(pilot_SNRdB)
N_a = np.random.randn( *self.H_a.shape) / np.sqrt( pilot_SNR)
Hp_a = self.H_a + N_a
self.W_a = np.linalg.pinv( Hp_a)
self.gen_Decoding()
def gen_WR_pilot_channel(self, pilot_SNRdB):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = self.Npilot
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
lm = linear_model.LinearRegression()
lm.fit( yT_a, x_a)
"""
Power normalization should be considered
unless it is multiplied with both sinal and noise.
In this case, MMSE weight is calculated while
pinv() obtain ZF filter.
"""
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gs_pilot_reg_only(self, alpha_l):
"""
Grid search is applied for alpha_l.
Later, the best alpha will be selected and decode data using it.
"""
pdo = pd.DataFrame()
for alpha in alpha_l:
pdi = self.cv_pilot_reg_only( alpha)
pdo = pdo.append( pdi, ignore_index = True)
return pdo
def gs_pilot_reg_full(self, alpha_l):
"""
Full means data and pilot are both generated and processed including data decoding
"""
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
self.rx_pilot()
return self.gs_pilot_reg_only( alpha_l)
def gs_pilot_reg_best(self, alpha_l):
"""
Find the best alpha using Ridge regression.
Return
--------
The best alpha is returned.
"""
pdi = self.gs_pilot_reg_only( alpha_l)
# print( 'pdi["E[scores]"]', pdi["E[scores]"])
i_max = np.argmin( pdi["E[scores]"])
alpha_best = pdi["alpha"][i_max]
return alpha_best
def gs_pilot_reg_best_full(self, alpha_l):
"""
Full means data and pilot are both generated and processed including data decoding
"""
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
self.rx_pilot()
return self.gs_pilot_reg_best( alpha_l)
def rx_pilot(self):
Npilot = self.Npilot
SNRpilot = self.SNR
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
self.rx_p = dict()
self.rx_p["yT_a"] = yT_a
self.rx_p["x_a"] = x_a
def cv_pilot_only(self):
"""
Cross-validatin scores are evaluated using LOO.
SNRpilot is equal to SNR, which is SNRdata.
"""
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
lm = linear_model.LinearRegression()
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
# Output is stored with enviromental variables.
pdi = pd.DataFrame()
pdi["model"] = ["LinearRegression"]
pdi["alpha"] = [0]
pdi["metric"] = ["mean_squared_error"]
pdi["E[scores]"] = [np.mean(scores)]
pdi["std[scores]"] = [np.std(scores)]
pdi["scores"] = [scores]
return pdi
def cv_pilot( self):
self.rx_pilot()
return self.cv_pilot_only()
def _cv_pilot_reg_only_r0(self, alpha = 0):
model = self.model
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
return scores
def cv_pilot_reg_only(self, alpha = 0):
model = self.model
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
# Output is stored with enviromental variables.
pdi = pd.DataFrame()
pdi["model"] = [model]
pdi["alpha"] = [alpha]
pdi["metric"] = ["mean_squared_error"]
pdi["E[scores]"] = [np.mean(np.power(scores,2))] # MSE
pdi["std[scores]"] = ["t.b.d."]
pdi["scores"] = [scores]
return pdi
def cv_pilot_reg( self, alpha = 0):
self.rx_pilot()
return self.cv_pilot_reg_only( alpha)
def _cv_pilot_reg_r0(self, alpha = 0):
"""
Cross-validatin scores are evaluated using LOO.
SNRpilot is equal to SNR, which is SNRdata.
"""
Npilot = self.Npilot
SNRpilot = self.SNR
model = self.model
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# kf = KFold()
# loo = cross_validation.LeaveOneOut( x_a.shape[0])
if alpha == 0:
lm = linear_model.LinearRegression()
else:
lm = getattr( linear_model, model)(alpha)
scores = codes.cross_val_score_loo( lm, yT_a, x_a)
return scores
def _gen_WR_pilot_ch_r0(self, pilot_SNRdB, alpha = 0):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = 10
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
lm = linear_model.Ridge( alpha)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def _gen_WR_pilot_ch_r1(self, pilot_SNRdB, alpha = 0, model = "Ridge"):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = 10
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
# Now you can use either Ridge or Lasso methods.
#lm = linear_model.Ridge( alpha)
lm = getattr( linear_model, model)(alpha)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR_pilot_ch(self, pilot_SNRdB, alpha_l1r = 0, model = "Ridge"):
"""
The reception process with pilot channel estimation
is conducted.
"""
Npilot = self.Npilot
SNRpilot = db2var( pilot_SNRdB)
BPSK, s_a, x_flat_a, x_a = gen_BPSK( Npilot, self.Nt)
# H_a = gen_H( self.Nr, self.Nt)
# H_a = self.H_a
y_a = gen_Rx( self.Nr, Npilot, SNRpilot, self.H_a, x_a)
yT_a = y_a.T
# print( x_a.shape, yT_a.shape)
# Now you can use either Ridge or Lasso methods.
#lm = linear_model.Ridge( alpha)
if model == "ElasticNet":
lm = linear_model.ElasticNet( alpha_l1r[0], alpha_l1r[1])
else:
lm = getattr( linear_model, model)(alpha_l1r)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR_pilot_only(self, alpha_l1r = 0):
"""
yT_a and x_a was prepared already.
Now, W_a is calculated using alpha and then,
decode data.
For linear regression, alpha_l1r should not be specified except 0.
"""
yT_a = self.rx_p["yT_a"]
x_a = self.rx_p["x_a"]
# for alpha == 0, model is changed to linear regression.
if alpha_l1r == 0:
model = "LinearRegression"
else:
model = self.model
if model == "LinearRegression":
lm = linear_model.LinearRegression()
elif model == "ElasticNet":
lm = linear_model.ElasticNet( alpha_l1r[0], alpha_l1r[1])
else: # This is either Ridge or Lasso
lm = getattr( linear_model, model)(alpha_l1r)
lm.fit( yT_a, x_a)
self.W_a = lm.coef_
# print( "np.dot( W_a, H_a) =", np.dot( self.W_a, self.H_a))
self.gen_Decoding()
def gen_WR( self, pilot_SNRdB = None):
if pilot_SNRdB:
gen_WR_pilot( pilot_SNRdB)
else:
gen_WR_ideal()
def gen_Decoding(self):
"""
The reception process is conducted.
"""
self.W_a = normalize( self.W_a) # not important (useless at this moment)
self.rT_a = np.dot( self.W_a, self.y_a)
self.r_flat_a = self.rT_a.T.flatten()
#print( "type( self.r_flat_a), type( self.BPSK)")
#print( type( self.r_flat_a), type( self.BPSK))
# self.sd_a = jpyx.mld( self.r_flat_a, self.BPSK)
self.sd_a = jpyx.mld_fast( self.r_flat_a, self.BPSK)
self.BER, self.Nerr = calc_BER( self.s_a, self.sd_a)
def run_ideal( self, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
return self.run_pilot( param_NtNrNxSNRdB = param_NtNrNxSNRdB, Nloop = Nloop, disp = disp)
def run_pilot( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None:
self.gen_WR_pilot( pilot_SNRdB)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def run_pilot_channel( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None:
# self.gen_WR_pilot( pilot_SNRdB)
self.gen_WR_pilot_channel( pilot_SNRdB)
# self.gen_WR_pilot_ch( pilot_SNRdB, alpha)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def run_pilot_ch( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, alpha = 0, disp = False):
"""
A system is run from the transmitter to the receiver.
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB:
# self.gen_WR_pilot( pilot_SNRdB)
# self.gen_WR_pilot_channel( pilot_SNRdB)
self.gen_WR_pilot_ch( pilot_SNRdB, alpha)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def test_ridge_iter( self, alpha_l):
# Ideal ZF(H)
ID = 0
self.method = "Ideal ZF(H)"
self.model = "ZF"
self.alpha = 0
self.gen_WR_ideal()
yield ID
# Multiple Ridge regressions with alpha_l
for alpha in alpha_l:
ID += 1
self.method = "Ridge each"
self.model = "Ridge"
self.alpha = alpha
self.gen_WR_pilot_only( self.alpha)
yield ID
# Ridge regression with the best alpha among alpha_l
ID += 1
self.method = "Ridge best"
self.model = "Ridge"
self.alpha = self.gs_pilot_reg_best( alpha_l)
self.gen_WR_pilot_only( self.alpha)
yield ID
def test_ridge_all( self, pdi_d_prev, alpha_l):
"""
1. LinearRegression
2. multiple Ridge regression with each alpha in alpha_l
3. Ridge regression with the best alpha among alpha_l
"""
# pdi_d is generated only once.
if pdi_d_prev is None:
pdi_d = dict()
else:
pdi_d = pdi_d_prev
for ID in self.test_ridge_iter(alpha_l):
"""
If pdi_l is not defined yet,
it will be generated first and initial values are stored.
Otherwise, new data are added for the corresponding space.
"""
if pdi_d_prev is None:
pdi = pd.DataFrame()
pdi["Nerr_total"] = [0]
pdi["BER_l"] = [[self.BER]]
else:
pdi = pdi_d[ ID]
pdi["Nerr_total"] = [ pdi["Nerr_total"][0] + self.Nerr]
pdi["BER_l"] = [pdi["BER_l"][0] + [self.BER]]
pdi["method"] = [self.method]
pdi["model"] = [self.model]
pdi["alpha"] = [self.alpha]
# print( 'pdi["BER_l"]', pdi["BER_l"])
pdi["BER"] = [np.mean( pdi["BER_l"][0])]
pdi_d[ ID] = pdi
return pdi_d
def run_gs_pilot_Ridge( self, alpha_l):
"""
Search the best alpha using Ridge.
I focus on Ridge for simplicity at this moment.
Other regularization modes will be used later on.
"""
Nloop = self.Nloop
pdi_d = None
for nloop in range( Nloop):
self.gen_BPSK()
self.gen_H()
self.gen_Rx()
# For fair comparision, pilot is also generated commonly for all methods.
self.rx_pilot()
pdi_d = self.test_ridge_all( pdi_d, alpha_l)
pdo = pd.DataFrame()
for pdi in pdi_d.values():
pdo = pdo.append( pdi, ignore_index = True)
return pdo
def run_pilot_ch_model( self, pilot_SNRdB = None, param_NtNrNxSNRdB = None, Nloop = 10, alpha = 0, disp = False):
"""
A system is run from the transmitter to the receiver.
self.model is used to determine the regression model such as Ridge and Lasso
"""
if param_NtNrNxSNRdB:
self.set_param( param_NtNrNxSNRdB)
self.gen_BPSK()
BER_l = list()
Nerr_total = 0
for nloop in range( Nloop):
self.gen_H()
self.gen_Rx()
if pilot_SNRdB is not None: # 'is' needed for checking None
# self.gen_WR_pilot( pilot_SNRdB)
# self.gen_WR_pilot_channel( pilot_SNRdB)
self.gen_WR_pilot_ch( pilot_SNRdB, alpha, self.model)
else:
self.gen_WR_ideal()
BER_l.append( self.BER)
Nerr_total += self.Nerr
self.BER = np.mean( BER_l)
if disp:
Ntot = self.Nt * self.Nx * Nloop
print( "BER is {} with {}/{} errors at {} SNRdB ".format( self.BER, Nerr_total, Ntot, self.SNRdB))
return self.BER
def get_BER_pilot_ch_model_eqsnr(
self,
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_ch = False,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
Nt, Nr, Nx = param_NtNrNx
BER_pilot = list()
for SNRdB in SNRdB_l:
# if pilot channel is used, SNRdB is given
# Otherwise, ideal channel estimation is assumed.
if pilot_ch:
pilot_SNRdB = SNRdB
else:
pilot_SNRdB = None
if alpha > 0:
"""
Ridge or Lasso is used.
"""
self.model = model
ber = self.run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
ber = self.run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch_model( self,
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_SNRdB = None,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
This function becomes a member function of class MIMO.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
Ridge or Lasso is used.
"""
for SNRdB in SNRdB_l:
self.model = model
ber = self.run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
for SNRdB in SNRdB_l:
ber = self.run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER( SNRdB_l = [5,6,7], param_NtNrNx = (2,4,100), Nloop = 1000, pilot_SNRdB = None):
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch( SNRdB_l = [5,6,7], param_NtNrNx = (2,4,100), Nloop = 1000, pilot_SNRdB = None, alpha = 0):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
LinearRegression is using.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_ch( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
Ridge is using.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def get_BER_pilot_ch_model(
SNRdB_l = [5,6,7],
param_NtNrNx = (2,4,100),
Nloop = 1000,
pilot_SNRdB = None,
alpha = 0,
model = "Ridge"):
"""
Ridge regression will be using to estimate channel.
If alpha is zero, linear regression will be applied.
If alpha is more than zero, Ridge regression will be applied.
The default value of alpha is zero.
"""
BER_pilot = list()
Nt, Nr, Nx = param_NtNrNx
if alpha > 0:
"""
Ridge or Lasso is used.
"""
for SNRdB in SNRdB_l:
ber = MIMO( model = model).run_pilot_ch_model( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, alpha = alpha, disp = True)
BER_pilot.append( ber)
else:
"""
LinearRegression is used.
"""
for SNRdB in SNRdB_l:
ber = MIMO().run_pilot_channel( pilot_SNRdB = pilot_SNRdB,
param_NtNrNxSNRdB =(Nt, Nr, Nx, SNRdB), Nloop = Nloop, disp = True)
BER_pilot.append( ber)
# print( "List of average BERs =", BER_pilot)
return BER_pilot
def pd_gen_4_snr_pilot(Method, BER_l, alpha = None, Npilot = 10,
sim_task = "Fixed SNRpilot", pilot_SNRdB = 7,
param_NtNrNx = (2,10,100), SNRdB_l = range(-5, 5, 5)):
"""
This is a generalized pd_gen() which can be used for both
fixed_snr_pilot() and snr_snr_pilot().
"""
pdi = pd.DataFrame()
pdi["Simulation task"] = [ sim_task] * len( BER_l)
pdi["Method"] = [ Method] * len( BER_l)
if type(pilot_SNRdB) is list:
pdi["SNRpilot"] = pilot_SNRdB
else:
pdi["SNRpilot"] = [pilot_SNRdB] * len( BER_l)
pdi["#pilots"] = [Npilot] * len( BER_l)
pdi["Nt,Nr,Nx"] = [param_NtNrNx] * len( BER_l)
if alpha is None:
pdi["alpha"] = ["Not defined"] * len( BER_l)
else:
pdi["alpha"] = [alpha] * len( BER_l)
pdi["SNR"] = SNRdB_l
pdi["BER"] = BER_l
return pdi
def fixed_snr_pilot( SNRdB_l = range(-5, 5, 1), param_NtNrNx = (2,10,100), pilot_SNRdB = 7,
alpha_l = [0.01, 0.1, 1, 10, 100], Nloop = 5000):
"""
Simulate BER for fixed SNRpilot cases
the results will be saved to pandas dataframe.
The basic parameters are given from the input argements.
"""
def pd_gen(Method, BER_l, alpha = None, Npilot = 10):
"""
This is a meta-function of pd_gen_4_snr_pilot()
"""
return pd_gen_4_snr_pilot( Method = Method, BER_l = BER_l, Npilot = Npilot, alpha = alpha,
sim_task = "Fixed SNRpilot", pilot_SNRdB = pilot_SNRdB,
param_NtNrNx = param_NtNrNx, SNRdB_l = SNRdB_l)
pdi_l = list()
BER_l = get_BER( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop, pilot_SNRdB = None)
pdi_l.append( pd_gen( "Ideal, ZF Rx", BER_l))
BER_l = get_BER_pilot_ch( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop, pilot_SNRdB = pilot_SNRdB)
pdi_l.append( pd_gen( r"Pilot, $\alpha$=0 (MMSE)", BER_l, alpha = 0))
for alpha in alpha_l:
BER_l = get_BER_pilot_ch( SNRdB_l, param_NtNrNx = param_NtNrNx, Nloop = Nloop,
pilot_SNRdB = pilot_SNRdB, alpha = alpha)
pdi_l.append( pd_gen( r"Pilot, $\alpha$={}".format(alpha),BER_l, alpha))
pdo = pd.concat( pdi_l, ignore_index = True)
return pdo
def snr_snr_pilot( SNRdB_l = range(-5, 5, 1), param_NtNrNx = (2,10,100),
alpha_l = [0.01, 0.1, 1, 10, 100], Npilot = 15, Nloop = 5000):
"""
Simulate BER for fixed SNRpilot cases
the results will be saved to pandas dataframe.
The basic parameters are given from the input argements.
"""
def pd_gen(Method, BER_l, alpha = None):
"""
This is a meta-function of pd_gen_4_snr_pilot()
"""
return pd_gen_4_snr_pilot( Method = Method, BER_l = BER_l, alpha = alpha,
Npilot = Npilot, sim_task = "SNRpilot = SNR", pilot_SNRdB = SNRdB_l,
param_NtNrNx = param_NtNrNx, SNRdB_l = SNRdB_l)
pdi_l = list()
mlm = MIMO( Npilot = Npilot)
print( "Ideal channel estimation without considering noise: ZF decoding with perfect H")
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = False)
pdi_l.append( pd_gen( "Ideal, ZF Rx", BER_l))
print( "General channel estimation: MMSE decoding with H and noise")
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = True)
pdi_l.append( pd_gen( r"Pilot, $\alpha$=0 (MMSE)", BER_l, alpha = 0))
print( "Ridge channel estimation: MMSE decoding with H and noise")
for alpha in alpha_l:
print( "Ridge with alpha =", alpha)
BER_l = mlm.get_BER_pilot_ch_model_eqsnr( SNRdB_l, param_NtNrNx = param_NtNrNx,
Nloop = Nloop, pilot_ch = True, alpha = alpha, model = "Ridge")
pdi_l.append( pd_gen( r"Pilot, $\alpha$={}".format(alpha),BER_l, alpha))
pdo = pd.concat( pdi_l, ignore_index = True)
return pdo
|
import webapp2
import os
import jinja2
import cgi
import re
import random
import string
import hashlib #various hashing algorithms
import hmac #hash-based message authentication code - for generating secret keys
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
# use autoescape to opt into escaping variables. Saves you from security holes if you don't filter
# variables in your templates with '| safe' or '| escape'
#
#####
##### Password Validation
#####
#regular expression constants
USER_RE = re.compile("^[a-zA-Z0-9_-]{3,20}$")
PASSWORD_RE = re.compile("^.{3,20}$")
EMAIL_RE = re.compile("^[\S]+@[\S]+\.[\S]+$")
COOKIE_RE = re.compile(r'.+=;\s*Path=/')
def valid_name(text):
return USER_RE.match(text)
def valid_password(passw):
return PASSWORD_RE.match(passw)
def valid_verify(passw, verify):
if (passw and verify):
if (verify == passw):
return True
else:
return False
def valid_email(email):
if email == "":
return True
else:
return EMAIL_RE.match(email)
def escape_html(s):
return cgi.escape(s, quote = True)
class Handler(webapp2.RequestHandler):
# create a shorthand for writing responses
# *a indicates a tuple
# **kw indicates a dictionary
# - when used in a function definition, it takes all the params passed and puts them into a tuple/dict
# - when used in a function call, it breaks those data structures apart.
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
# **params is the python syntax for extra parameters
# 't.render' is a jinja function
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template,**kw))
class SignupHandler(Handler):
def render_form(self, name_error="", username="", pass_error="", verify_error="", email_error="", email=""):
self.render("signup.html",
name_error=name_error,
username=username,
pass_error=pass_error,
verify_error=verify_error,
email_error=email_error,
email=email)
def get(self):
self.render_form()
def post(self):
#get data from the forms
user_username = self.request.get('username')
user_password = self.request.get('password')
user_verify = self.request.get('verify')
user_email = self.request.get('email')
#test the data and get data (true) or None
username = valid_name(user_username)
password = valid_password(user_password)
verify = valid_verify(user_password, user_verify)
email = valid_email(user_email)
#if test gives us None, assing an error string. If test gives us good data, assign an empty string
user_cookie_str = self.request.cookies.get('name')
name_error = ""
if not username:
name_error = "That's not a valid username."
if user_cookie_str:
# print "username: " + username
# print "get user: " + get_user(user_cookie_str)
if user_username == get_user(user_cookie_str):
name_error = "You've already signed up"
# name_error = "That's not a valid username." if not username else ""
pass_error = "That's not a valid password." if not password else ""
verify_error = "Passwords do not match." if not verify else ""
email_error = "That's not a valid email address." if not email else ""
#if there are any errors, write them
if (name_error or pass_error or verify_error or email_error):
self.render_form(
name_error, user_username,
pass_error,
verify_error,
email_error, user_email
)
#else, redirect to a welcome screen along with the regex sanitized username
else:
#encode cookie
self.response.headers['Content-Type'] = 'text/plain'
user_id = str(make_pw_hash(user_username, user_password))
# self.response.headers.add_header('Set-Cookie', 'visits=%s' % new_cookie_val)
self.response.headers.add_header('Set-Cookie', 'name=%s; Path=/' % user_id)
self.redirect('/welcome')
class LoginHandler(Handler):
def render_form(self, invalid_login=""):
self.render("login.html", invalid_login=invalid_login)
def get(self):
self.render_form()
def post(self):
#get data from the forms
user_username = self.request.get('username')
user_password = self.request.get('password')
#if test gives us None, assing an error string. If test gives us good data, assign an empty string
user_cookie_str = self.request.cookies.get('name')
invalid_login = ""
if user_cookie_str:
if not valid_pw(user_username, user_password, user_cookie_str):
invalid_login = "Invalid Login"
if (invalid_login):
#if there are any errors, write them
self.render_form(invalid_login)
#else, redirect to a welcome screen along with the regex sanitized username
else:
self.redirect('/welcome')
class LogoutHandler(Handler):
def get(self):
user_cookie_str = self.request.cookies.get('name')
if user_cookie_str:
self.response.delete_cookie('name')
print valid_cookie(user_cookie_str)
self.redirect('/signup')
class WelcomeHandler(Handler):
def get(self):
#decode cookie
user_cookie_str = self.request.cookies.get('name')
if user_cookie_str:
user = get_user(user_cookie_str)
if user:
msg = "Welcome, %s!" % user
else:
msg = "Whoops, something went wrong"
self.render("welcome.html", msg=msg)
else:
self.redirect('/signup')
#####
##### Hashing Functions
#####
SECRET = 'mysecret'
def make_salt():
return ''.join(random.choice(string.letters) for x in xrange(5))
def hash_str(s):
return hmac.new(SECRET, s).hexdigest()
# def make_secure_val(s):
# return str(s) + '|' + hash_str(s)
# def check_secure_val(h):
# val = h.split('|')[0]
# s = h.split('|')[2]
# print "val " + val
# print "s " + s
# if s == make_secure_val(val):
# return val
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s|%s|%s' % (name, salt, h)
def valid_pw(name, pw, h):
# get the salt from the hash
salt = h.split('|')[1]
# combine the salt with the submitted name and pw
# if the existing cookie hash matches the hash derived from the new User/Password, then
# return the hash value
if h == make_pw_hash(name, pw, salt):
return h
def get_user(cookie):
return cookie.split('|')[0]
def valid_cookie(cookie):
return cookie and COOKIE_RE.match(cookie)
#####
##### Blog Pages
#####
class Entry(db.Model):
subject = db.StringProperty(required = True)
content = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
class NewPostForm(Handler):
def render_form(self, subject="", content="", error=""):
self.render("newpost.html", subject=subject, content=content, error=error)
def get(self):
# pass them into the template
self.render_form()
def post(self):
subject = self.request.get("subject")
content = self.request.get("content")
if(subject and content):
e = Entry(subject = subject, content = content)
e.put()
post_id = str(e.key().id())
#so we don't get the annoying reload message if we reload the page
self.redirect('/' + post_id)
else:
error = "please enter some values"
self.render_form(subject, content, error=error)
# inherits from Handler
class BlogPage(Handler):
def render_front(self):
entries = db.GqlQuery("SELECT * FROM Entry ORDER BY created DESC")
# keyentry = entries.get_by_id(5205088045891584)
# entry = Entry.get(_by_id(post_id))
self.render("recentposts.html", entries=entries)
def get(self):
# pass them into the template
self.render_front()
class Single(Handler):
def render_single(self, post_id):
post_id = int(post_id)
# entries = db.GqlQuery("SELECT * FROM Entry ORDER BY created DESC")
entry = Entry.get_by_id(post_id)
self.render("single.html", entry=entry)
def get(self, post_id):
self.render_single(post_id)
# this is the router
app = webapp2.WSGIApplication([
('/', BlogPage),
('/signup', SignupHandler),
('/login', LoginHandler),
('/logout', LogoutHandler),
('/welcome', WelcomeHandler),
('/newpost', NewPostForm),
('/(\d+)', Single) #lr somehow pass the id of the blog post here
], debug=True)
|
from unittest import mock
from testbase import *
from live_client.events import raw
DEFAULT_EVENT = {}
A_TIMESTAMP = 1582927835001
DEFAULT_TIMESTAMP = 0
class TestCreateEvent:
@mock.patch("live_client.connection.autodetect.build_sender_function", lambda _: no_action)
@mock.patch("live_client.utils.logging.debug")
def test_message_logged(self, debug_mock):
event_type = "__event_type__"
event_data = {}
settings = {"live": "__live__"}
raw.create(event_type, event_data, settings)
debug_mock.assert_called_with(f'Creating raw event of type "{event_type}": {event_data}')
@mock.patch("live_client.utils.logging.debug", no_action)
def test_event_sent(self):
buffer = []
def collect(event):
nonlocal buffer
buffer.append(event)
with mock.patch(
"live_client.connection.autodetect.build_sender_function", lambda _: collect
):
event_type = "__event_type__"
event_data = {"data": {}}
settings = {"live": "__live__"}
raw.create(event_type, event_data, settings)
assert len(buffer) > 0
assert buffer[0]["data"] is event_data["data"]
class TestFormatEvent:
def test_formatted_event_is_not_the_original(self):
event = DEFAULT_EVENT
formatted_event = raw.format_event(event, 0, "_")
assert event is not formatted_event
def test_formatted_event_has_event_type(self):
event_type = "__event_type__"
event = raw.format_event(DEFAULT_EVENT, event_type, "_")
assigned_type = event.get("__type")
assert assigned_type is not None and assigned_type == event_type
def test_formatted_event_has_timestamp(self):
timestamp = get_timestamp()
event = raw.format_event(DEFAULT_EVENT, "_", timestamp)
assigned_timestamp = event.get("liverig__index__timestamp")
assert assigned_timestamp is not None and assigned_timestamp == timestamp
class TestFormatAndSend:
event_data = None
def mock_connection_func(self, event):
self.event_data = event
def test_timestamp_prop_is_removed(self):
raw.format_and_send({"timestamp": "_"}, "_", self.mock_connection_func)
assert self.event_data.get("timestamp") is None
def test_timestamp_is_formatted(self):
raw.format_and_send({"timestamp": "_"}, "_", self.mock_connection_func)
assert self.event_data.get("liverig__index__timestamp") is not None
def test_timestamp_is_inserted(self):
raw.format_and_send({}, "_", self.mock_connection_func)
assert self.event_data.get("liverig__index__timestamp") is not None
|
import unittest
import numpy as np
from OptimalSpline import OptimalSpline
class SplineTest(unittest.TestCase):
def test_at_waypoints(self):
c = np.array([[-1, 3, 0, 2], [-1, 0, 3, 4], [-1, -3, 0, 6]]).transpose()
ts = [0, 1, 2, 3]
s = OptimalSpline(c, ts)
self.assertEqual(s.val(0, 0), 2)
self.assertEqual(s.val(0, 1), 4)
self.assertEqual(s.val(0, 2), 6)
self.assertEqual(s.val(0, 3), 2)
def test_interpolation(self):
c = np.array([[-1, 3, 0, 2], [-1, 0, 3, 4], [-1, -3, 0, 6]]).transpose()
ts = [0, 1, 2, 3]
s = OptimalSpline(c, ts)
self.assertAlmostEqual(s.val(0, 4), -14)
self.assertAlmostEqual(s.val(0, -1), 6)
self.assertAlmostEqual(s.val(0, 0.8), 3.408)
self.assertAlmostEqual(s.val(0, 2.8), 3.568)
def test_derivatives(self):
c = np.array([[-1, 3, 0, 2], [-1, 0, 3, 4], [-1, -3, 0, 6]]).transpose()
ts = [0, 1, 2, 3]
s = OptimalSpline(c, ts)
self.assertEqual(s.val(1, 0), 0)
self.assertEqual(s.val(1, 1), 3)
self.assertEqual(s.val(1, 2), 0)
self.assertEqual(s.val(1, 3), -9)
self.assertAlmostEqual(s.val(1, 0.5), 2.25)
self.assertAlmostEqual(s.val(1, 2.5), -3.75)
self.assertEqual(s.val(2, 0), 6)
self.assertEqual(s.val(2, 1), 0)
self.assertEqual(s.val(2, 2), -6)
self.assertEqual(s.val(2, 3), -12)
self.assertAlmostEqual(s.val(2, 0.5), 3)
self.assertAlmostEqual(s.val(2, 2.5), -9)
self.assertEqual(s.val(3, 0), -6)
self.assertEqual(s.val(3, 1), -6)
self.assertEqual(s.val(3, 2), -6)
self.assertEqual(s.val(3, 3), -6)
self.assertEqual(s.val(3, 0.5), -6)
self.assertEqual(s.val(3, 2.5), -6)
def test_coefficients(self):
num_segments = 4
order = 5
ts = [0, 1, 3, 4, 5]
coefficients = np.random.rand(num_segments * (order+1))
c = np.fliplr(coefficients.reshape((num_segments, order+1)))
s = OptimalSpline(c.transpose(), ts)
self.assertTrue((s._get_coeff_vector() == coefficients).all())
if __name__ == '__main__':
unittest.main()
|
"""Crie um programa onde o user possa digitar 5 valores numéricos e cadastre-os em uma lista_geral.
já na posição correta de inserção (sem usar o sort()).
No final, mostre a lista_geral ordenada na tela."""
lista = []
for c in range(0, 5):
print('-' * 30)
num = int(input(f'Digite o {c+1}° valor: '))
if c == 0 or num > lista[-1]: # Se ele é primeir ou maior que o último
lista.append(num)
print('Adicionei no \033[2:33mFINAL\033[m da lista_geral')
else:
pos = 0
while pos < len(lista):
if num <= lista[pos]:
lista.insert(pos, num) # Faz uma varredura na lista_geral afim de verificar se os valores inseridos são
# maiores que o user digitou
print(f'Adicionei na posição \033[33m{pos}\033[m da lista_geral')
break
pos += 1
print('='*30)
print(f'Os valores digitados em ordem foram {lista}')
|
from django.db import models
from django.contrib.auth.models import AbstractUser
import django.utils.timezone as timezone
from DjangoUeditor.models import UEditorField
from rest_framework.authtoken.models import Token
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# 数据库表结构
class ForumUser(AbstractUser):
'''
django.contrib.auth.models.User 默认User类字段太少,用AbstractUser
自定义一个User类,增加字段
'''
nickname = models.CharField(max_length=200, null=True, blank=True)
avatar = models.CharField(max_length=200, null=True, blank=True)
signature = models.CharField(max_length=500, null=True, blank=True) # 签名
location = models.CharField(max_length=200, null=True, blank=True)
website = models.URLField(null=True, blank=True)
company = models.CharField(max_length=200, null=True, blank=True)
role = models.IntegerField(null=True, blank=True)
balance = models.IntegerField(null=True, blank=True)
reputation = models.IntegerField(null=True, blank=True)
self_intro = models.CharField(max_length=500, null=True, blank=True) # 自我介绍
updated = models.DateTimeField(null=True, blank=True)
class File(models.Model):
'''
文件
'''
user_id = models.IntegerField(null=True,blank=True)
file_name = models.CharField(max_length=200,null=True, blank=True)
file = models.FileField(upload_to='upload/')
class Meta:
verbose_name_plural = '文件'
class Blog(models.Model):
'''
编辑的文章
'''
CLASSIFTY_TYPE = (
('iOS', 'iOS'),
('javascript', 'Javascript'),
('other', 'Other'),
)
creatdate = models.DateTimeField('创建时间', auto_now=True)
update = models.DateTimeField('更新时间', default=timezone.now())
classify = models.CharField('类型',max_length=15,null=True,choices=CLASSIFTY_TYPE)
count = models.IntegerField('点击次数', null=True, blank=True,default=0)
name = models.CharField('文章名字',max_length=100, blank=True)
content = UEditorField(u'内容 ', width=600, height=300, toolbars="full", imagePath="upload/images/",
filePath="upload/images/",
upload_settings={"imageMaxSize": 1204000},
settings={}, command=None, blank=True)
class Meta:
verbose_name_plural='编辑的文章'
class UserCenter(models.Model):
'''
用户中心
'''
name = models.CharField('标题名称',max_length=10,null=True,blank=True)
weight = models.IntegerField('1-100',default=1,validators=[MaxValueValidator(100),MinValueValidator(1)])
icon = models.CharField('标题的icon',max_length=50,null=True,blank=True)
is_super = models.BooleanField('是否是超级用户所有',default=False)
class Meta:
verbose_name_plural='用户中心'
|
#!/usr/bin/env python3
#
#########################################################################
# Copyright 2017 René Frieß rene.friess(a)gmail.com
#########################################################################
#
# This file is part of SmartHomeNG.
#
# SmartHomeNG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHomeNG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHomeNG. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
import MVGLive
from lib.model.smartplugin import SmartPlugin
class MVG_Live(SmartPlugin):
ALLOW_MULTIINSTANCE = False
PLUGIN_VERSION = "1.5.1"
def __init__(self, sh, *args, **kwargs):
"""
Initializes the plugin
"""
self.logger = logging.getLogger(__name__)
self._mvg_live = MVGLive.MVGLive()
def run(self):
self.alive = True
def stop(self):
self.alive = False
def get_station_departures(self, station, timeoffset=0, entries=10, ubahn=True, tram=True, bus=True, sbahn=True):
return self._mvg_live.getlivedata(station, timeoffset, entries, ubahn, tram, bus, sbahn)
|
def execute_instructions(instructions):
# Returns True if execution reaches end of instructions (index > len(instructions)), otherwise returns False if loop
acc = 0
index = 0
indices_used = []
max_index = 0
while index not in indices_used:
if index >= len(instructions):
print("PING")
return acc, True
rule = instructions[index]
print(rule, index, acc)
indices_used.append(index)
if rule[0] == "nop":
index += 1
if rule[0] == "acc":
if "+" in rule[1]:
acc += int(rule[1].strip("+"))
elif "-" in rule[1]:
acc -= int(rule[1].strip("-"))
index += 1
if rule[0] == "jmp":
if "+" in rule[1]:
index += int(rule[1].strip("+"))
elif "-" in rule[1]:
index -= int(rule[1].strip("-"))
if index > max_index:
max_index = index
print(f"Index: {index}\nMax Index: {max_index}\nACC: {acc}")
return acc, False
def day_8_part_1(instructions):
acc, reached_end = execute_instructions(instructions)
print(acc, reached_end)
print()
def day_8_part_2(instructions):
indices_tried = []
execution_ended = False
index = -1
while not execution_ended:
index += 1
instruction = instructions[index]
if instruction[0] == 'jmp' and index not in indices_tried:
indices_tried.append(index)
instructions[index][0] = 'nop'
acc, execution_ended = execute_instructions(instructions)
instructions[index][0] = 'jmp'
print("finished executing instructions")
#acc, reached_end = execute_instructions(instructions)
print(acc, execution_ended)
print()
def parse_instructions(filename):
with open(filename, "r") as instruction_file:
instructions = [rule.strip() for rule in instruction_file.readlines()]
instructions = [rule.split(" ") for rule in instructions]
return instructions
if __name__ == "__main__":
instructions = parse_instructions("inputs/instructions.txt")
day_8_part_1(instructions)
day_8_part_2(instructions) |
# 使用类模块对鸢尾花进行训练
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
x_train = datasets.load_iris().data
y_train = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
# 总觉得这样做好蠢啊
class IrisModel(Model):
def __init__(self):
super(IrisModel, self).__init__()
# 创建对象得时候,给对象设置网络结构————神经元为3个,使用softmax作为激活函数,使用L2正则化
self.d1 = Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
def call(self, x):
y = self.d1(x)
return y
# 搭建网络结构
model = IrisModel()
# 配置训练方法
model.compile(
# 配置优化器
optimizer=tf.keras.optimizers.SGD(lr=0.1),
# 配置损失函数
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
# 设置准确率
metrics=['sparse_categorical_accuracy']
)
# 执行训练过程,训练特征为x_train 训练标签为y_train 32个数据为1批 训练集得百分之二十为测试集 训练二十次验证一次
model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)
# 打印网络结构和参数统计
model.summary()
|
from pyfcm import FCMNotification
'''
# Send a message to devices subscribed to a topic.
result = push_service.notify_topic_subscribers(topic_name="news", message_body=message)
# Conditional topic messaging
topic_condition = "'TopicA' in topics && ('TopicB' in topics || 'TopicC' in topics)"
result = push_service.notify_topic_subscribers(message_body=message, condition=topic_condition)
# FCM first evaluates any conditions in parentheses, and then evaluates the expression from left to right.
# In the above expression, a user subscribed to any single topic does not receive the message. Likewise,
# a user who does not subscribe to TopicA does not receive the message. These combinations do receive it:
# TopicA and TopicB
# TopicA and TopicC
# Conditions for topics support two operators per expression, and parentheses are supported.
# For more information, check: https://firebase.google.com/docs/cloud-messaging/topic-messaging
'''
SERVER_KEY = 'AAAAlf4IDNc:APA91bGYUv2ULlv5eoljoNcFgy9bhvdrlhaHW564P3HPs69i_htNJUCQs8JQGrkr3MGCHkRDj9cqwW4zAHDU7F9bwUP69UWkRBJ_HM2TMEdcYGXvyA8WKnbGybMELGSoio3y1LVFEvz7'
push_service = FCMNotification(api_key=SERVER_KEY)
topic = "MBC_pdnote"
message = 'TEST %s' % topic
# result = push_service.notify_topic_subscribers(topic_name=topic, message_body=message)
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/2/2021 12:48 PM
# @File:demo2
# 以图片为圆心,根据像素点与圆心的距离来进行不同程度的光照增强。
# coding:utf-8
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
def stronglight(img,rows,cols,strength = 200):
# rows h,cols w
# strength设置光照强度
# 设置中心点
centerX = rows / 2
centerY = cols / 2
# print(centerX, centerY)
radius = min(centerX, centerY)
# print(radius)
# 图像光照特效
for i in range(rows):
for j in range(cols):
# 计算当前点到光照中心距离(平面坐标系中两点之间的距离)
distance = math.pow((centerY - j), 2) + math.pow((centerX - i), 2)
# 获取原始图像
B = img[i, j][0]
G = img[i, j][1]
R = img[i, j][2]
if (distance < radius * radius):
# 按照距离大小计算增强的光照值
result = (int)(strength * (1.0 - math.sqrt(distance) / radius))
B = img[i, j][0] + result
G = img[i, j][1] + result
R = img[i, j][2] + result
# 判断边界 防止越界
B = min(255, max(0, B))
G = min(255, max(0, G))
R = min(255, max(0, R))
img[i, j] = np.uint8((B, G, R))
else:
img[i, j] = np.uint8((B, G, R))
return img
if __name__ == '__main__':
# 读取原始图像
img = cv2.imread('2020063010140796.jpg')
# 获取图像行和列
rows, cols = img.shape[:2]
img = stronglight(img, rows, cols)
# 显示图像
cv2.imwrite('test.jpg', img)
plt.imshow(img)
plt.show()
|
import argparse
import logging
from src.data.arpa.make_arpa import make_arpa_dataset
from src.data.make_dataset import make_dataset
from src.data.weather.make_weather import make_weather_dataset
from src.models.normalize_weather import predict_normalized_pollutant
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--build_history",
help="[False] historical data are read from zipped csv instead of prebuilt pickle",
required=False, default=False, action="store_true")
parser.add_argument("-d", "--daily",
help="[False] daily data are considered instead of hourly",
required=False, default=False, action="store_true")
args = parser.parse_args()
parms = {
"build_historical": args.build_history,
"use_daily": args.daily
}
return parms
def main(build_historical: bool, use_daily: bool, **kwargs):
logging.info("Updating data and executing the normalization pipeline")
make_arpa_dataset(build_historical=build_historical)
make_weather_dataset()
make_dataset(use_daily=use_daily)
predict_normalized_pollutant()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main(**parse_args())
|
"""Interactive Programming Mini-Project 4: Live Wallpaper
Authors: Hwei-Shin Harriman and Jessie Potter
References: http://programarcadegames.com/python_examples/en/sprite_sheets/"""
import pygame
import constants
import random
import math
class SpriteSheet(object):
"""Class used to grab images out of a sprite sheet"""
def __init__(self, file_name):
self.sprite_sheet = pygame.image.load(file_name).convert_alpha()
def get_image(self, x, y, width, height):
"""Grab image out of a sprite sheet.
x,y: x,y location of sprite
width, height: width/height of sprite"""
#create new blank image
image = pygame.Surface([width, height], pygame.SRCALPHA)
#copy the sprite from large sheet onto small image
image.blit(self.sprite_sheet, (0,0), (x,y, width, height))
#Return the image
return image
class Sprite(pygame.sprite.Sprite):
"""Takes two arguments and generates a sprite
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__()
sprite_sheet = SpriteSheet(sheet_name)
self.image = sprite_sheet.get_image(sprite_sheet_data[0], sprite_sheet_data[1], sprite_sheet_data[2], sprite_sheet_data[3])
self.rect = self.image.get_rect()
def draw(self, surface):
surface.blit(self.image, self.rect)
def update(self, shift_x):
return True
class Balloon(Sprite):
"""Takes two arguments and generates a balloon
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__(sprite_sheet_data, sheet_name)
self.change_x =0
self.change_y = 1
self.boundary_top = self.rect.y + 2
self.boundary_bottom = self.rect.y - 2
self.age = 0
self.rate = random.randint(10,20)
#makes the balloon bob up and down and travel to the left until it despawns off-screen
def update(self, shift_x):
self.age += 1
#move up/down
self.rect.y = 2*math.sin(self.age/self.rate) + self.rect.y
#move left/right
self.rect.x += shift_x
#survive until off-screen to the left
return self.rect.x >= -200
class BlackCloud(Sprite):
"""Takes two arguments and generates a cloud from the blackcloud sprite sheet
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__(sprite_sheet_data, sheet_name)
#moves the cloud to the left at a constant rate. Once off-screen, repositions itself to the right of the screen.
def update(self, shift_x):
#move left/right
self.rect.x += shift_x - 1
if self.rect.x < -2100:
self.rect.x = random.randint(20,38)*100
return True
class PurpleCloud(Sprite):
"""Takes two arguments and generates a cloud from the purple cloud sprite sheet
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__(sprite_sheet_data, sheet_name)
#moves the cloud to the left at a constant rate. Once off-screen, repositions itself to the right of the screen.
def update(self, shift_x):
#move left/right
self.rect.x += shift_x - 2
if self.rect.x < -1500:
self.rect.x = random.randint(20,38)*100
return True
class SmallCloud(Sprite):
"""Takes two arguments and generates a cloud from the small cloud sprite sheet
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__(sprite_sheet_data, sheet_name)
#moves the cloud to the left at a constant rate. Once off-screen, repositions itself to the right of the screen.
def update(self, shift_x):
#move left/right
self.rect.x += shift_x - 3
if self.rect.x < -1000:
self.rect.x = random.randint(20,38)*100
return True
class Cactus(Sprite):
"""Takes two arguments and generates a cactus.
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__(sprite_sheet_data, sheet_name)
#moves the cactus to the left at the same rate as the background. Once off-screen, repositions itself back to its original place on the right side of the screen.
def update(self, shift_x):
#move left/right
self.rect.x += shift_x
if self.rect.x < -1000:
self.rect.x = 2958
return True
class Flower(Sprite):
"""Takes two arguments and generates a flower.
sprite_sheet_data: an array of 4 numbers (xpos, ypos, width, height) of a sprite from the sprite sheet
sheet_name: a string of the filename that the sprite is being pulled from"""
def __init__(self, sprite_sheet_data, sheet_name):
super().__init__(sprite_sheet_data, sheet_name)
#moves the flower to the left. Once off-screen, despawns.
def update(self, shift_x):
#move left/right
self.rect.x += shift_x
return self.rect.x >= -200
class Scene():
"""Generic super class used to define the objects, can create subclasses to actually create specific landscapes"""
def __init__(self):
#background image
self.background = pygame.image.load("summerbackground.png").convert()
self.background.set_colorkey(constants.WHITE)
self.background_size = self.background.get_size()
self.background_rect = self.background.get_rect()
self.w, self.h = self.background_size
self.x = 0
self.x1 = self.w
#How far this world has been scrolled left/right
self.world_shift = 0
#list containing all sprites that need to be drawn
self.active_sprites = []
#Update all of the sprites based on their individual update functions
def update(self, shift_x):
sprites = self.active_sprites
self.active_sprites = []
for s in sprites:
survive = s.update(shift_x)
if survive:
self.active_sprites.append(s)
#Update everything in the landscapes
def draw(self, screen):
for s in self.active_sprites:
s.draw(screen)
def shift_world(self, shift_x, screen):
#make everything scroll at a nice, constant rate
#keep track of the shift amount
self.world_shift += shift_x
#Keep track of background loop shift
self.x += shift_x
self.x1 += shift_x
screen.blit(self.background, (self.x,0))
screen.blit(self.background, (self.x1,0))
if self.x < -self.w:
self.x = self.w
if self.x1 < -self.w:
self.x1 = self.w
#adds a random hot air balloon from the pool of possible balloons to the active sprites list
def spawnballoon(self, xpos, ypos):
currentballoon = constants.balloons[random.randint(0,10)]
block = Balloon(currentballoon, "hotairballoons.png")
block.rect.x = xpos
block.rect.y = ypos
self.active_sprites.append(block)
#adds a flower sprite to the active sprite list
def spawnflower(self, xpos, ypos):
block = Flower(constants.FLOWER, "flower.png")
block.rect.x = xpos
block.rect.y = ypos
self.active_sprites.append(block)
class Summer(Scene):
"""Defintion for Summer live background."""
def __init__(self):
#Call parent constructor
Scene.__init__(self)
#Array with type of cloud, and x, y location of the cloud
enviro = constants.summer
#Go through the array above and add cloud_list
for i in range(len(enviro)):
if 0 <= i <= 3:
block = BlackCloud(enviro[i][0], "blackclouds.png")
block.rect.x = enviro[i][1]
block.rect.y = enviro[i][2]
self.active_sprites.append(block)
elif 4 <= i <= 7:
block = PurpleCloud(enviro[i][0], "purpleclouds.png")
block.rect.x = enviro[i][1]
block.rect.y = enviro[i][2]
self.active_sprites.append(block)
elif 8 <= i <= 11:
block = SmallCloud(enviro[i][0], "smallclouds.png")
block.rect.x = enviro[i][1]
block.rect.y = enviro[i][2]
self.active_sprites.append(block)
elif i == 12:
block = Cactus(enviro[i][0], "bigcactus.png")
block.rect.x = enviro[i][1]
block.rect.y = enviro[i][2]
self.active_sprites.append(block)
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'search.views.index', name='home'),
url(r'^direct_targets', 'search.views.direct_search',
name='direct_search'),
url(r'^enrichment_analysis', 'search.views.enrichment_analysis',
name='enrichment_analysis'),
url(r'^query_db', 'search.views.query_db', name='query_db'),
url(r'^download/(?P<size>(all|page))',
'search.views.download', name='download'),
url(r'^download_file/(?P<fileid>\d+)',
'search.views.download_file', name='download_file'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
# Day_03_04_sigmoid.py
import math
import matplotlib.pyplot as plt
def show_sigmoid():
def sigmoid(z):
return 1 / (1 + math.e ** -z)
print(math.e)
print(sigmoid(-10))
print(sigmoid(-1))
print(sigmoid(0))
print(sigmoid(1))
print(sigmoid(10))
for i in range(-10, 10):
s = sigmoid(i)
plt.plot(i, s, 'ro')
plt.show()
def select_ab(y):
def A():
return 'A'
def B():
return 'B'
print(y*A() + (1-y)*B())
if y == 1:
print(A())
else:
print(B())
# show_sigmoid()
select_ab(0)
select_ab(1)
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
import xgboost as xgb
from datetime import datetime
# import judge_result
import io_operation
import sys
def read_file_for_local_test():#本地测试的程序读写
f1 = open(sys.argv[4])
test_id_list = []
test_data_y = []
for eachline in f1 :
if eachline.__contains__('sampleid'):
continue
if len(eachline.split('\n')[0].split('\r')[0].split(',')) < 2:
print eachline
continue
current_id = eachline.split('\n')[0].split('\r')[0].split(',')[1]
test_data_y.append(int(eachline.split('\n')[0].split('\r')[0].split(',')[0]))
test_id_list.append(current_id)
f1.close();
return test_id_list, test_data_y
def xgboost_main(deep,rate,times):
dtest = xgb.DMatrix(sys.argv[5])
param = {'max_depth':deep, 'eta':rate, 'silent':0, 'objective':'binary:logistic' }
bst = xgb.Booster()
bst.load_model('model_auto/xgboost_model_'+sys.argv[6]+'_'+ sys.argv[1] +'_'+ sys.argv[2] +'_'+ sys.argv[3]+".model")
preds = bst.predict(dtest)
return preds
test_id_list,label = read_file_for_local_test()
prediction = xgboost_main(int(sys.argv[1]), float(sys.argv[2]), int(sys.argv[3]))
if len(test_id_list) != len(prediction):
exit(-1)
predict_sample = {}
test_sample = {}
for i in range(len(test_id_list)):
predict_sample.setdefault(test_id_list[i], 1 - prediction[i])
test_sample.setdefault(test_id_list[i], label[i])
io_operation.write_to_file('result/result_'+sys.argv[6]+'_'+sys.argv[7]+'_'+ sys.argv[1] +'_'+ sys.argv[2] +'_'+ sys.argv[3] +'_.csv',str(test_id_list[i]) + ',' + str(1 - prediction[i]) + ',' + str(label[i]) )
exit(0)
|
# Work with Python 3.6
import discord
from discord.ext import commands
from xml.dom import minidom
import random
import os
import asyncio
import globals
try:
mydoc = minidom.parse("CoOpBotParameters.xml")
except:
print("Unable to open file CoOpBotParameters.xml")
raise
# Unique Bot Token
token = mydoc.getElementsByTagName('BotToken')[0].firstChild.data
# Command prefix character
prefixChar = mydoc.getElementsByTagName('PrefixChar')[0].firstChild.data
description = '''Bot for the Friendly CoOp Discord server'''
bot = commands.Bot(command_prefix = str(prefixChar), description = description)
globals.setStartTime()
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print("Prefix char: "+prefixChar)
print('------')
#await bot.change_presence(game=discord.Game(name='Resurrecting CoOpBot'))
#######################################################################
#
# Background task
# runs every 5 minutes to automatically assign game roles to users
#
#######################################################################
async def assign_game_roles():
await bot.wait_until_ready()
while not bot.is_closed:
# Do for each server
for server in bot.connection.servers:
# Do for each user in that server
for user in server.members:
# Check if they are playing a game
if user.game is None:
pass
else:
# Loop through the roles in the current server
for role in server.roles:
# Check if the game name exists as a role
if user.game.name == role.name:
# Assign role to user if they are not a member of that role already
if role in user.roles:
pass
else:
await bot.add_roles(user, role)
# TODO - check if a string translation exists from the game being played to a valid role name
await asyncio.sleep(300)
#######################################################################
#
# Anti-spam background task
# runs every time a message is sent and mutes people if they spam too many messages
#
#######################################################################
async def antiSpam(message):
bot.loop.create_task(globals.addSpamCounter(user = message.author, bot = bot, server = message.guild, channel = message.channel))
bot.loop.create_task(globals.reduceSpamCounter(user = message.author, bot = bot))
#######################################################################
#
# Commands that aren't explictly called
# checks for certain text in each message
#
#######################################################################
@bot.event
async def on_message(message):
messageStrLower = message.content.lower()
msg = None
# We do not want the bot to reply to itself or any other bots
if message.author.bot:
return
bot.loop.create_task(antiSpam(message))
if messageStrLower == "ayyy":
msg = "Ayyy, lmao"
if msg == None and messageStrLower == "winner winner":
msg = "Chicken dinner"
if msg == None and messageStrLower == "new number":
msg = "Who dis?"
if msg == None and (messageStrLower == "you" or messageStrLower == "u"):
msg = "No u"
if msg == None and messageStrLower == "good bot":
goodBotResponses = ["Good human"]
goodBotResponses.append("No u")
goodBotResponses.append("Why thank you!");
goodBotResponses.append("(◠﹏◠✿)");
goodBotResponses.append("v(◠o◠)v");
goodBotResponses.append("( ͡° ͜ʖ ͡°)");
goodBotResponses.append("Beep Boop");
goodBotResponses.append("Yes {0.author.mention}, good bot indeed".format(message));
goodBotResponses.append("More like Gu'd bot");
goodBotResponses.append("Bot is the Cakeob!");
msg = random.choice(goodBotResponses)
if msg == None and messageStrLower.rfind("pixis") != -1:
msg = "PIXISUUUUUUUU"
# Send Message if we have one
if msg != None:
await message.channel.send(msg)
# Try to process commands if no message sent by the bot yet
else:
await bot.process_commands(message)
# import modules
for file in os.listdir("Modules"):
if file.endswith(".py"):
name = file[:-3]
bot.load_extension(f"Modules.{name}")
# Set the background task to run
#bot.loop.create_task(assign_game_roles())
# Start the bot
bot.run(token, bot=True, reconnect=True) |
"""
Keyword for safety
It is a good idea to start using keyword arguments once a
function or method has three more parameters. Especially,
if there are optional parameters.
Let us assume that we have to set the last maintenance date
but we don't have an ad reel
"""
from screen_control import Screen3DControl
screen6_3d = Screen3DControl(
'Ready Player one',
'2018-04-01',
)
screen6_3d.view_last_3d_maintenance()
# Output: No record of maintenance
screen6_3d.play_ad()
# Output: Ad 2018-04-01 is being played
# WHAT???
# Could you rectify this issue?
screen6_3d = Screen3DControl(
'Ready Player one',
last_3d_maintenance='2018-04-01',
)
screen6_3d.view_last_3d_maintenance()
# Output: Last 3D maintenance was performed on 2018-04-01
screen6_3d.play_ad()
# Output: This screen currently has no ad reel
# It is better to pass everything as keyword argument
# The code will be more readable and you don't have to
# worry about parameter order
screen6_3d = Screen3DControl(
movie='Ready Player one',
last_3d_maintenance='2018-04-01',
ad_reel="Documentaries"
)
# We have changed parameter order but we are fine
# since we are passing keyword arguments
screen6_3d.play_ad()
# Output: Ad Documentaries is being played |
import pyppeteer
import asyncio
from pyppeteer import launch
width, height = 1366, 768
#
# async def main():
# browser = await launch(headless=False)
# page = await browser.newPage()
# await page.setViewport({'width': width, 'height': height})
# await page.goto('https://www.taobao.com')
# await asyncio.sleep(3)
#
#
# asyncio.get_event_loop().run_until_complete(main())
import asyncio
from pyppeteer import launch
# async def main():
# browser = await launch(headless=False)
# page = await browser.newPage()
# await page.goto('https://www.taobao.com')
# await asyncio.sleep(10)
#
#
# asyncio.get_event_loop().run_until_complete(main())
#
# import asyncio
# from pyppeteer import launch
#
#
# async def main():
# # headless参数设为False,则变成有头模式
# browser = await launch(headless=False)
#
# page = await browser.newPage()
# # 设置页面视图大小
# await page.setViewport(viewport={'width': 1280, 'height': 800})
#
# await page.goto('https://www.baidu.com/')
# # 节点交互
# await page.type('#kw', '周杰伦', {'delay': 1000})
# await asyncio.sleep(3)
# await page.click('#su')
# await asyncio.sleep(30)
# # 使用选择器选中标签进行点击
# alist = await page.querySelectorAll('.s_tab_inner > a')
# a = alist[3]
# await a.click()
# await asyncio.sleep(30)
# await browser.quit()
#
#
# asyncio.get_event_loop().run_until_complete(main())
import asyncio
from pyppeteer import launch
async def main():
# headless参数设为False,则变成有头模式
browser = await launch(headless=False,autoClose=False,args=['--disable-infobars'])
page = await browser.newPage()
# 设置页面视图大小
await page.setViewport(viewport={'width': 1280, 'height': 800})
await page.goto('https://www.taobao.com/')
# 节点交互
await page.type('#q', '冬装', {'delay': 1000})
await asyncio.sleep(3)
await page.click('#J_TSearchForm > div.search-button > button')
await asyncio.sleep(60)
# 使用选择器选中标签进行点击
# alist = await page.querySelectorAll('.s_tab_inner > a')
# a = alist[3]
# await a.click()
# await asyncio.sleep(30)
# await browser.quit()
asyncio.get_event_loop().run_until_complete(main())
|
import MeCab
import os
def TestFileOpen(dir, filename):
full_path = os.path.join(dir, filename)
with open(full_path, 'r') as file:
sentence_list = file.readlines()
return sentence_list
def subtract_subjects(sentence_list):
tagger = MeCab.Tagger('-d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd')
tagger.parse('')
response = []
for sentence in sentence_list:
sentence = sentence.replace('…', '').replace('「', '').replace('」', '').replace('\n','').strip()
if sentence:
parsed_sentence = tagger.parse(sentence)
parsed_sentence = parsed_sentence.split('\n')
response_words = ''
for word in parsed_sentence:
if word not in ['EOS', '']:
print(word.split('\t')[1])
if __name__=='__main__':
dir = '/home/maya/PycharmProjects/NarouScraping/Narou_v10/NarouTopModernRenai'
file = 'n0690cw.txt'
sentences = TestFileOpen(dir, file)
subtract_subjects(sentences)
|
def find_changes(sequence): # Finds changes between values in a list
changes = []
for i in range(len(sequence)-1):
changes.append(sequence[i+1]-sequence[i])
return changes
def identify_sequence(changes): # Identifies the sequence
level = 0
while True:
changes = find_changes(changes)
level += 1
print("Level ",level,": ",changes)
if len(changes) < 2:
return "Insufficient values"
done = True
for i in range(len(changes)-1):
if changes[i] != changes[i+1]:
done = False
if done:
break
while True: # Inputs sequence length
try:
sequence_length = int(input("How many values?: "))
if sequence_length > 0:
break
else:
print("Sequence length has to be more than 0.")
except ValueError:
print("Invalid value entered, has to be int.")
sequence = []
while len(sequence) < sequence_length: # Inputs the sequence
try:
sequence.append(int(input("Enter a value: ")))
except ValueError:
print("Invalid value entered, has to be int.")
identify_sequence(sequence)
|
from django.shortcuts import render, redirect
from django.contrib import messages
from .models import Quote
from ..login_and_registration_app.models import User
# import datetime
# Create your views here.
def index(request):
# User.objects.all().delete()
# Quotes.objects.all().delete()
return render(request, 'quotes_app/index.html')
def quotes(request):
# print "*"*42
if not request.session.get('id'):
messages.error(request, 'Access Denied. Log in first.')
return redirect('/')
user = User.objects.get(id=request.session.get('id'))
print user
quotables = Quote.objects.all().exclude(faves=user)
favorites = Quote.objects.filter(faves=user)
context = {
'user': user,
'quotables': quotables,
'favorites': favorites,
}
# print context
return render(request, 'quotes_app/quotes.html', context)
def add_to_faves(request):
if not request.session.get('id'):
messages.error(request, 'Access Denied. Log in first.')
return redirect('/')
if request.method == 'POST':
# print request.POST
# print request.POST['appt_id']
quote = Quote.objects.get(id=request.POST['quote'])
user = User.objects.get(id=request.session.get('id'))
quote.faves.add(user)
quote.save()
return redirect('/quotes')
def add_a_quote(request):
if not request.session.get('id'):
messages.error(request, 'Access Denied. Log in first.')
return redirect('/')
results = Quote.objects.addQuoteVal(request.POST)
if not results['error_message'] == []:
for error in results['error_message']:
messages.error(request, error)
else:
messages.success(request, 'Quote Successfuly Added.')
# return redirect('books/'+str(results['book'].id))
return redirect('/quotes')
def remove_fave(request):
if not request.session.get('id'):
messages.error(request, 'Access Denied. Log in first.')
return redirect('/')
if request.method == 'POST':
quote = Quote.objects.get(id=request.POST['fave'])
user = User.objects.get(id=request.session.get('id'))
quote.faves.remove(user)
quote.save()
return redirect('/quotes')
def user_page(request, id):
user = User.objects.get(id=id)
quotes_by_user = Quote.objects.filter(posted_by=user)
quote_count = quotes_by_user.count()
# print "*"*42
# print quote_count
# print "*"*42
# print quotes_by_user
# print appt_time_str
context = {
'user' : user,
'quotes_by_user' : quotes_by_user,
'quote_count' : quote_count
}
# print context
return render(request, 'quotes_app/user_page.html', context)
def logout(request):
request.session.clear()
messages.success(request, 'Logged Out')
return redirect('/')
def home(request):
return redirect('/quotes')
|
## Anthony Dike - Due: Nov. 28, 2017
## CSCI-UA.0002-012
## Assignment 8: Part 1
#This program converts the zeros with index values that are prime in the range of 1-1001 to ones.
"""
# KEY
# 1 == NON PRIME
# 0 = PRIME
CHANGE TOTAL VALUE TO 1000 at end
"""
# Create a list of 1,000 values ... all of which are set to zero
myList = [0] * 1000
# set first two values to 1
myList[0] = 1
myList[1] = 1
"""
Ruling Out Non-Primes
"""
# make all values in the indices that are multiples of 2 non-prime (0)
# excluding 2
for num in range(4, len(myList)):
if num % 2 == 0:
myList[num] = 1
#print(myList) # Test
# make all values in the indices that are multiples of 3 non-prime (0)
# excluding 3
for num in range(6, len(myList)):
if num % 3 == 0:
myList[num] = 1
#print(myList) # Test
# make all values in the indices that are multiples of 5 non-prime (0)
# excluding 5
for num in range(10, len(myList)):
if num % 5 == 0:
myList[num] = 1
#print(myList) # Test
"""
Printing the Primes Indices
"""
print()
print()
print()
##for n in range(len(myList)):
## if myList[n] == 0:
## #print("True") #Test1: PASSED
## print(myList.index(myList[n]))
rowCounter = 0
for n in range(0, len(myList)):
if myList[n] == 0:
#for x in range (0,10):
print(format(myList.index(myList[n]),">3"), end=" ")# the issue is having indices finder
#count for more than the first occurence
rowCounter += 1
if rowCounter % 10 == 0:
print("\n")
# change the number completely from a 0 to a diff num after
# it is read.
myList[myList.index(myList[n])] = 2
#print(myList) #test
|
from django.contrib import admin
from gestionPeliculas.models import Pais, Director, Genero, Pelicula
# Register your models here.
admin.site.register(Pais)
admin.site.register(Director)
admin.site.register(Genero)
admin.site.register(Pelicula) |
from django.db import models
from django.utils import timezone
class Ideas(models.Model):
date = models.CharField(max_length=200)
costs = models.DecimalField(max_digits=8, decimal_places=2, default=0)
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
keyword = models.CharField(max_length=200)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.date
|
from komodo_outings import Outing
outings = []
while True:
print(
"""
Please choose an option:
1. To enter an outing
2. List all events
3. List the dollar total of all events
4. Subtotal of the dollar amount of event
5. Exit
""")
choice = input("Which option > ")
if choice == "1":
print("Adding new Outing")
ed = input("Date of event:")
ev = input("Event type: ").lower()
c = int(input("Cost:"))
p = int(input("Number of people:"))
cp = c / p
new_outing = Outing(ed, ev, c, p, cp)
outings.append(new_outing)
print(outings)
elif choice == "2":
print ("Event Date Type of Event Total Cost # of people Cost per person")
for e in outings:
print(e)
elif choice == "3":
total = 0
for e in outings:
total += e.costofevent
print(f"The total cost of all events {total}")
elif choice == "4":
t_total = 0
print (outings)
event = input("What event total are you interested in: ").lower()
for e in outings:
if e.eventtype == event:
t_total += e.costofevent
print(f"The cost of {event} events is {t_total}")
elif choice == "5":
exit()
else:
print("invalid option") |
import pika
import sys
import json
import redis
bigHash = redis.Redis(db=14)
bigHash.flushall()
bigHash.hmset("1010", {'product_name':'Rubik\'s cube', 'quantity':'4', 'cost':'20', 'category':'Puzzle'})
bigHash.hmset("1001", {'product_name':'OnePlus3t', 'quantity':'2', 'cost':'637', 'category':'Phone|Black'})
bigHash.hmset("0101", {'product_name':'Intel i5 m460', 'quantity':'1', 'cost':'202', 'category':'Processor'})
bigHash.hmset("0010", {'product_name':'Pilot\t', 'quantity':'13', 'cost':'4', 'category':'Pen|Blue'})
bigHash.hmset("1111", {'product_name':'Asus F53s', 'quantity':'1', 'cost':'388', 'category':'Notebook|Black'})
categoryHash=redis.Redis(db=15)
for key in bigHash.keys():
cur_category = bigHash.hget(key, "category").decode("utf-8")
it = 0
while it != -1:
lable = str(cur_category[it:]).find('|')
if lable == -1:
ans = categoryHash.hget(cur_category[it:], "items")
if ans == None:
ans = key
else:
ans = ans + key
categoryHash.hset(cur_category[it:], "items", ans)
it = lable
else:
ans = categoryHash.hget(cur_category[it:], "items")
if ans == None:
ans = key
else:
ans = ans + key
categoryHash.hset(cur_category[it:lable], "items", ans)
it = lable + 1
dict_with_consumers_basket = {}
dict_with_chanels = {}
total_customers = 0
made_purchase = 0
#string
# .encode('utf-8')
#bytes
# .decode('utf-8')
def callback(ch, method, properties, body):
global total_customers # problem with parameter passing to callback function
global made_purchase
rec_mes = json.loads(body)
print(rec_mes)
chnl_in_dict = dict_with_chanels.get(rec_mes[0])
if chnl_in_dict == None:
total_customers += 1
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', heartbeat = 0))
channel_server_to_client = connection.channel()
channel_server_to_client.queue_declare(queue=rec_mes[0] + '_queue')
dict_with_chanels[rec_mes[0]] = channel_server_to_client
dict_with_consumers_basket[rec_mes[0]] = redis.Redis(db=int(rec_mes[0]))
ans = ''
if rec_mes[1] == 'add_to_cart':
if rec_mes[2].encode('utf-8') not in bigHash.keys():
ans = 'There is no product with this item number\n'
elif int(rec_mes[3]) > int(bigHash.hget(rec_mes[2], "quantity")):
ans = 'Not enough product. Available: ' + bigHash.hget(rec_mes[2], "quantity").decode("utf-8") + '\n'
else:
dict_with_consumers_basket[rec_mes[0]].hset(rec_mes[2], "quantity", rec_mes[3])
ans = 'Done\n'
elif rec_mes[1] == 'rem_from_cart':
if dict_with_consumers_basket[rec_mes[0]].hexists(rec_mes[2], "quantity") == False:
ans = "This product is not in backet\n"
else:
dict_with_consumers_basket[rec_mes[0]].hdel(rec_mes[2], "quantity")
ans = 'Done\n'
elif rec_mes[1] == 'buy':
made_purchase += 1
ans = 'Products\n'
products = ''
flag = False
total = 0
keys = dict_with_consumers_basket[rec_mes[0]].keys()
if len(keys) == 0:
ans = 'It is a pity that You did not buy anything\n'
else:
for key in keys:
wanted_quantity = int(dict_with_consumers_basket[rec_mes[0]].hget(key, "quantity"))
in_stock = bigHash.hexists(key,'cost')
if in_stock == False:
flag = True
continue
in_stock = bigHash.hgetall(key)
if wanted_quantity <= int(in_stock[b'quantity']):
products += str(wanted_quantity) + 'x' + in_stock[b'product_name'].decode('utf-8') + '\n'
total += int(in_stock[b'cost']) * wanted_quantity
if wanted_quantity == int(in_stock[b'quantity']):
del_category = bigHash.hget(key, 'category').decode('utf-8')
cur_key = key.decode('utf-8')
it = 0
list_w_category = []
while it != -1:
lable = del_category[it:].find('|')
if lable == -1:
list_w_category.append(del_category[it:])
it = lable
else:
list_w_category.append(del_category[it:it + lable])
it += lable + 1
for del_c in list_w_category:
str_w_items_num = categoryHash.hget(del_c, 'items').decode('utf-8')
if len(str_w_items_num) == 4:
categoryHash.hdel(del_c, 'items')
else:
items = [str_w_items_num[i:i + 4] for i in range(0, len(str_w_items_num), 4)]
new_items = ''
for item in items:
if item != cur_key:
new_items += item
categoryHash.hset(del_c, 'items', new_items)
bigHash.hdel(key, 'product_name', 'quantity', 'cost', 'category')
else:
bigHash.hincrby(key, 'quantity', -wanted_quantity)
else:
flag = True
dict_with_consumers_basket[rec_mes[0]].hdel(key, "quantity")
if flag == True:
products += 'Note: One or more products have run out\n'
ans += products + 'Total\n' + str(total) + '\n'
dict_with_chanels[rec_mes[0]].basic_publish(exchange='', routing_key=rec_mes[0] + '_queue', body = json.dumps(ans))
dict_with_chanels[rec_mes[0]].basic_publish(exchange='', routing_key=rec_mes[0] + '_queue', body = json.dumps('STOP'))
return
elif rec_mes[1] == 'discard':
dict_with_consumers_basket[rec_mes[0]].flushdb()
ans = 'Done\n'
elif rec_mes[1] == 'show_all':
for key in bigHash.keys():
cur_product = bigHash.hvals(key)
ans += (key + b'\t' + cur_product[0] + b'\t' + b'x' + cur_product[1] + b'\t' + cur_product[2] + b'\t' + cur_product[3] + b'\n').decode('utf-8')
elif rec_mes[1] == 'list_category':
for key in categoryHash.keys():
ans += key.decode('utf-8') + '\n'
elif rec_mes[1] == 'show_cart':
keys = dict_with_consumers_basket[rec_mes[0]].keys()
flag = False
for key in keys:
if bigHash.hexists(key,'product_name') == False:
flag = True
continue
name = bigHash.hget(key, 'product_name')
num = dict_with_consumers_basket[rec_mes[0]].hget(key, 'quantity')
ans += (num + b'x' + name).decode('utf-8') + '\n'
if flag == True:
ans += 'Note: One or more products have run out\n'
elif rec_mes[1] == 'show_category':
if categoryHash.hexists(rec_mes[2], "items") == False:
ans = 'Wrong category\n'
else:
field = categoryHash.hget(rec_mes[2], "items").decode('utf-8')
keys = [field[i:i + 4] for i in range(0, len(field), 4)]
for key in keys:
cur_product = bigHash.hvals(key)
ans += key + (b'\t' + cur_product[0] + b'\t' + b'x' + cur_product[1] + b'\t' + cur_product[2] + b'\t' + cur_product[3] + b'\n').decode('utf-8')
elif rec_mes[1] == 'get_stat':
if rec_mes[2] == 'cost':
if len(dict_with_consumers_basket[rec_mes[0]].keys()) == 0:
ans = 'Empty basket\n'
else:
num = 0
total_cost = 0
for key in dict_with_consumers_basket[rec_mes[0]].keys():
cur_quantity = int(dict_with_consumers_basket[rec_mes[0]].hget(key, 'quantity'))
cur_cost = int(bigHash.hget(key, 'cost'))
num += cur_quantity
total_cost += cur_cost * cur_quantity
ans = str(total_cost / num) + '\n'
elif rec_mes[2] == 'made_purchase':
ans = str(made_purchase) + '\n'
elif rec_mes[2] == 'avg_num_of_diff_prod':
if len(dict_with_consumers_basket[rec_mes[0]].keys()) == 0:
ans = 'Empty basket\n'
else:
num = 0
list_w_keys = dict_with_consumers_basket[rec_mes[0]].keys()
for key in list_w_keys:
cur_quantity = int(dict_with_consumers_basket[rec_mes[0]].hget(key, 'quantity'))
num += cur_quantity
ans = str(num / len(list_w_keys)) + '\n'
else:
ans = str(total_customers) + '\n'
else:
print('AVOST')
exit(1)
dict_with_chanels[rec_mes[0]].basic_publish(exchange='', routing_key=rec_mes[0] + '_queue', body = json.dumps(ans))
my_id = 'server'
connection = pika.BlockingConnection(pika.ConnectionParameters(host = 'localhost', heartbeat = 0))
channel = connection.channel()
ex_name = 'server_exchange'
ex_que_name = 'server_exchange_queue'
channel.exchange_declare(exchange = ex_name, exchange_type = 'fanout')
result = channel.queue_declare(queue = ex_que_name, exclusive = True)
channel.queue_bind(exchange = ex_name, queue = ex_que_name)
channel.basic_consume(queue = ex_que_name, on_message_callback = callback, auto_ack = True)
channel.start_consuming()
|
import numpy as np
from probability_model import ProbabilisticModel
class MixtureModel(object):
def __init__(self, allModels):
self.model_list = allModels.copy()
self.nModels = len(allModels)
self.alpha = (1 / self.nModels) * np.ones(self.nModels)
self.probTable = None
self.nSol = None
def createTable(self, solutions, CV, num_input, modelType, dims, probs_RL=None):
if CV:
self.nModels = self.nModels + 1
self.model_list.append(ProbabilisticModel(modelType=modelType))
self.model_list[-1].buildModel(solutions, num_input)
self.alpha = (1 / self.nModels) * np.ones(self.nModels)
nSol = solutions.shape[0]
self.nSol = nSol
self.probTable = np.ones([nSol, self.nModels])
if probs_RL is None:
for j in range(self.nModels - 1):
# print(j)
self.model_list[j].modify1(dims)
self.probTable[:, j] = self.model_list[j].pdfEval(solutions)
# print(self.probTable.shape)
else:
for j in range(0, self.nModels - 2):
self.probTable[:, j] = self.model_list[j].pdfEval(solutions)
self.probTable[:, -2] = probs_RL
for i in range(nSol): # Leave-one-out cross validation
x = np.concatenate((solutions[:i, :], solutions[i + 1:, :]))
tModel = ProbabilisticModel(modelType=modelType)
tModel.buildModel(x, num_input)
self.probTable[i, -1] = tModel.pdfEval(solutions[[i], :])
else:
nSol = solutions.shape[0]
self.probTable = np.ones([nSol, self.nModels])
for j in range(self.nModels):
self.probTable[:, j] = self.model_list[j].pdfEval(solutions)
self.nSol = nSol
def EMstacking(self):
iterations = 100
for _ in range(iterations):
talpha = self.alpha
probVector = np.matmul(self.probTable, talpha.T)
for i in range(self.nModels):
talpha[i] = np.sum((1 / self.nSol) * talpha[i] * self.probTable[:, i] / probVector)
self.alpha = talpha
def mutate(self):
modif_alpha = np.maximum(self.alpha + np.random.normal(0, 0.01, self.nModels), 0)
total_alpha = np.sum(modif_alpha)
if total_alpha == 0:
self.alpha = np.zeros(self.nModels)
self.alpha[-1] = 1
else:
self.alpha = modif_alpha / total_alpha
def sample(self, nSol, samplesRL=None):
indSamples = np.ceil(nSol * self.alpha).astype(int)
solutions = np.array([])
for i in range(self.nModels):
if indSamples[i] == 0:
pass
elif i == self.nModels - 2 and samplesRL is not None:
solutions = np.vstack([solutions, samplesRL]) if solutions.size else samplesRL
else:
sols = self.model_list[i].sample(indSamples[i])
solutions = np.vstack([solutions, sols]) if solutions.size else sols
solutions = solutions[np.random.permutation(solutions.shape[0]), :]
solutions = solutions[:nSol, :]
return solutions
def n_samples(self, ind, nSol):
return np.ceil(nSol * self.alpha[ind]).astype(int)
|
#
import os
import random
from config_file import Config
config = Config()
filename_list = config.filename_list
data_path = config.dataset_path
def filereader(filename, dev_num):
list_train = []
list_test = []
file_train = open(data_path + filename + '.task.train', 'r', encoding='gb18030', errors='ignore')
file_test = open(data_path + filename + '.task.test', 'r', encoding='gb18030', errors='ignore')
# name = filename.split('_')
save_dir = data_path + '/new/'
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_file_dev = open(os.path.join(save_dir, filename +'_dev'),'w')
save_file_trn = open(os.path.join(save_dir, filename + '_trn'), 'w')
save_file_tst = open(os.path.join(save_dir, filename + '_tst'), 'w')
all_leng = 0
for line in file_train:
label = line[0]
text = line[2:].replace('\n','')
list_train.append(text+' ||| '+label)
# print('OK,begin to write train data !')
for line in file_test:
label = line[0]
text = line[2:].replace('\n','')
list_test.append(text+' ||| '+label)
if len(list_train) < 1600:
theta = 1600 - len(list_train)
for i in range(theta):
list_train.append(list_train[i])
print(len(list_train))
random.shuffle(list_train)
for i in range(len(list_train)):
if i < dev_num:
all_leng += len(list_train[i])
save_file_dev.write(list_train[i] + '\n')
elif i < 1600:
all_leng += len(list_train[i])
save_file_trn.write(list_train[i] + '\n')
else:
continue
for j in range(len(list_test)):
if j < 400:
all_leng += len(list_test[j])
save_file_tst.write(list_test[j] + '\n')
print(filename+' average length is: '+str(all_leng/(2000)))
save_file_dev.close()
save_file_trn.close()
save_file_tst.close()
file_train.close()
file_test.close()
del file_test
del file_train
print('****************This work have finished '+filename+' !****************')
for filename in filename_list:
filereader(filename, 200)
|
# Django settings for westiseast2 project.
import os
PROJECT_PATH = os.path.abspath(os.path.split(__file__)[0])
DEBUG = False
GA_IS_ON = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Chris West', 'chris@fry-it.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
MEDIA_ROOT = os.path.join(PROJECT_PATH, "static")
STATIC_URL = '/static/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
TEMPLATE_LOADERS = (
'django_mobile.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'westiseast.blog.context_processors.common',
'django_mobile.context_processors.flavour',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django_mobile.middleware.MobileDetectionMiddleware',
'django_mobile.middleware.SetFlavourMiddleware',
)
ROOT_URLCONF = 'westiseast.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates/")
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.sitemaps',
'django_static',
'blog',
'sorl.thumbnail',
'taggit',
)
FLAVOURS_TEMPLATE_PREFIX = ''
BASE_TEMPLATE = 'base.html'
BASE_TEMPLATE_MOBILE = 'base_mobile.html'
# django static information
DJANGO_STATIC_SAVE_PREFIX = '/tmp/cache-forever/westiseast'
DJANGO_STATIC_NAME_PREFIX = '/cache-forever/westiseast'
DJANGO_STATIC = True
DJANGO_STATIC_MEDIA_URL = 'http://static.westiseast.co.uk'
MEDIA_URL = '/'
try:
from local_settings import *
except ImportError:
pass
|
import numpy as np
from lib.DQNAgent import DQNAgent
from lib.PolicyAnalyzer import PolicyAnalyzer
model = input("What is the name of the model file: ")
agent = DQNAgent(4,2)
agent.load('./saves/'+model+'.h5')
def dql_policy(pos, vel, angle, angular_vel):
state = np.reshape([pos, vel, angle, angular_vel], [1, 4])
action_vals = agent.model.predict(state)
return np.argmax(action_vals[0])
analyzer = PolicyAnalyzer(episodes=10, steps=1000, render=True)
# Simply add your policy and give it a name
analyzer.register_policy("DQN Policy", dql_policy)
# Run the policy analyzer and get stats on how your policy did.
analyzer.run()
|
class Powers:
def __init__(self, square, cube):
self._square = square
self._cube = cube
def __getattr__(self, item):
if item == 'square':
return self._square ** 2
elif item == 'cube':
return self._cube ** 3
else:
raise TypeError('Unknown attr: ' + item)
def __setattr__(self, key, value):
if key == 'square':
self.__dict__['_square'] = value
else:
self.__dict__[key] = value
x = Powers(3, 4)
print(x.square, x.cube)
|
from rest_framework import viewsets, status
from rest_framework.decorators import list_route, detail_route
from rest_framework.response import Response
from .models import Question, Answer, Tournament, TournamentParticipation
from .serializers import QuestionSerializer, QASerializer, TournamentSerializer, TournamentParticipationSerializer
def calculate_score(questions, answers):
score = 0
for i in range(len(questions)):
q_id = int(questions[i])
a_id = int(answers[i])
q = Question.objects.get(id=q_id)
correct_id = None
for a in q.answer_set.all():
if a.correct:
correct_id = a.id
break
if a_id == correct_id:
score += 1
return score
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
@list_route(methods=['POST', ], url_path='answer')
def answer(self, request):
serializer = QASerializer(data=request.data)
if serializer.is_valid():
questions = serializer.data['questions']
answers = serializer.data['answers']
score = calculate_score(questions, answers)
if request.user.id is not None:
learner = request.user.learner_set.all()[0]
learner.points += score
learner.save()
return_questions = []
for i in range(len(questions)):
temp_dict = {}
q_id = int(questions[i])
a_id = int(answers[i])
q = Question.objects.get(id=q_id)
correct_id = None
for a in q.answer_set.all():
if a.correct:
correct_id = a.id
break
temp_dict['q_id'] = q_id
temp_dict['your_a_id'] = a_id
temp_dict['correct_a_id'] = correct_id
return_questions.append(temp_dict)
return Response({'score': score, 'questions': return_questions}, status=status.HTTP_200_OK)
class TournamentViewSet(viewsets.ModelViewSet):
queryset = Tournament.objects.all()
serializer_class = TournamentSerializer
@detail_route(methods=['POST', ], url_path='answer')
def answer(self, request, pk):
serializer = QASerializer(data=request.data)
if serializer.is_valid():
questions = serializer.data['questions']
answers = serializer.data['answers']
score = calculate_score(questions, answers)
if request.user.id is not None:
learner = request.user.learner_set.all()[0]
learner.points += score * 5
learner.save()
t = Tournament.objects.get(id=pk)
TournamentParticipation.objects.create(learner=learner, tournament=t, score=score)
# return_questions = []
# for i in range(len(questions)):
# temp_dict = {}
# q_id = int(questions[i])
# a_id = int(answers[i])
# q = Question.objects.get(id=q_id)
# correct_id = None
# for a in q.answer_set.all():
# if a.correct:
# correct_id = a.id
# break
# temp_dict['q_id'] = q_id
# temp_dict['your_a_id'] = a_id
# temp_dict['correct_a_id'] = correct_id
# return_questions.append(temp_dict)
return Response(status=status.HTTP_200_OK)
@detail_route(methods=['GET', ], url_path='result')
def result(self, request, pk):
t = Tournament.objects.get(id=pk)
participations = TournamentParticipation.objects.filter(tournament=t).order_by('-score')
serializer = TournamentParticipationSerializer(participations, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
import sys,math,time
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD
from pgmpy.inference import VariableElimination
from GibbsSamplingWithEvidence import GibbsSampling
from prettytable import PrettyTable
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
print("\n\n\n\n\nAsia Bayesian Network\n------------------------\n")
######################
### CREATE MODEL
######################
asiaNetwork = BayesianModel([('asia','tub'),
('tub','either'),
('smoke','lung'),
('lung','either'),
('either','xray'),
('either','dysp'),
('smoke','bron'),
('bron','dysp'),])
# non dependant variables first
cpdAsia = TabularCPD(variable='asia', variable_card=2,values=[[0.99], [0.01]])
cpdSmoking = TabularCPD(variable='smoke', variable_card=2,values=[[0.5], [0.5]])
# dependant on only one thing
cpdTub = TabularCPD(variable='tub', variable_card=2,values=[[0.99, 0.95], [0.01, 0.05]],evidence=['asia'], evidence_card=[2])
cpdLung = TabularCPD(variable='lung', variable_card=2,values=[[0.99, 0.9],[0.01, 0.1]],evidence=['smoke'], evidence_card=[2])
cpdBron = TabularCPD(variable='bron', variable_card=2,values=[ [0.7, 0.4],[0.3, 0.6]],evidence=['smoke'], evidence_card=[2])
cpdEither = TabularCPD(variable='either', variable_card=2,
values=[[0.999, 0.001, 0.001, 0.001],
[0.001, 0.999, 0.999, 0.999]],
evidence=['tub', 'lung'],
evidence_card=[2, 2])
cpdDysp = TabularCPD(variable='dysp', variable_card=2,
values=[[0.9, 0.2, 0.3, 0.1],
[0.1, 0.8, 0.7, 0.9]],
evidence=['bron', 'either'],
evidence_card=[2, 2])
cpdXRay = TabularCPD(variable='xray', variable_card=2,values=[[0.95, 0.02],[0.05, 0.98]],evidence=['either'], evidence_card=[2])
# add the cpds to the network
asiaNetwork.add_cpds(cpdAsia,cpdSmoking,cpdTub,cpdLung,cpdBron,cpdEither,cpdDysp,cpdXRay)
# check network is valid
asiaNetwork.check_model()
######################
### GET ARGUMENTS
######################
# arguments
params = {}
possibleArgs = ["--evidence","--query","--exact","--gibbs","-N","--ent"]
arguments = sys.argv
# create a dictionary of all our arguments based off the command line input
for currentArg in possibleArgs:
if(currentArg in arguments):
thisArgsValues = []
i = arguments.index(currentArg)+1
while(i < len(arguments) and arguments[i][0] != "-"):
thisArgsValues.append(arguments[i])
i += 1
params[currentArg] = thisArgsValues
#print(params)
### Finding evidence from args
evidence = {}
if("--evidence" in params):
for item in params["--evidence"]:
evidence[item.split("=")[0]] = int(item.split("=")[1])
######################
### INFERENCE
######################
# calculate exact posterior probabilites
exactInference = VariableElimination(asiaNetwork)
approxInference = GibbsSampling(asiaNetwork)
if("--query" in params):
print("\nExact Inference")
for query in params["--query"]:
q = exactInference.query(variables=[query], evidence=evidence)
print(q[query])
if("--gibbs" in params):
print("\nApprox Inference")
if("-N" in params):
samples = approxInference.sample(size=int(params["-N"][0]),evidence=evidence)
else:
# use defalut value of 500
samples = approxInference.sample(size=500,evidence=evidence)
for query in params["--query"]:
p1 = sum(samples[query])/len(samples[query])
p0 = 1 - p1
results = PrettyTable([str(query),str("phi(") + str(query) + str(")")])
results.add_row([str(query) + "_0",str(round(p0,4))])
results.add_row([str(query) + "_1",str(round(p1,4))])
print(results)
if("--ent" in params):
print("\nCross Entropy")
if("--query" in params):
crossEntropy = 0
# only doing this once speeds things up
samples = approxInference.sample(size=int(params["-N"][0]),evidence=evidence)
for query in params["--query"]:
exactQueryValues = exactInference.query(variables=[query], evidence=evidence)[query].values
approxQueryValues = sum(samples[query])/len(samples[query])
# this is doing the formula at the bottom of the second page
crossEntropy -= (1-approxQueryValues)*math.log(exactQueryValues[0]) + (approxQueryValues)*math.log(exactQueryValues[1])
print("\nThe cross entropy is : ",crossEntropy)
else:
print("\nCannot perform cross entropy with no --query params\n")
sys.exit()
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# © ОАО «Северное ПКБ», 2014
from setuptools import setup, find_packages
setup(name="cluster-tools",
version="0.1.6",
description="библиотека утилит для кластера высокой готовности",
author="Лаборатория 50",
author_email="team@lab50.net",
url="http://lab50.net",
classifiers=[
'Environment :: High Availability Cluster',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: Other/Proprietary License',
'Topic :: System :: Clustering',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
#install_requires=[
# 'netsnmp',
# ],
include_package_data=True,
packages=find_packages(exclude=["tests"]),
)
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from KalturaClient import *
from KalturaMetadataClientPlugin import *
import logging
import urllib
import time
import re
logging.basicConfig(level = logging.DEBUG,
format = '%(asctime)s %(levelname)s %(message)s',
stream = sys.stdout)
# UPDATE THIS
PARTNER_ID = 12345678
SECRET = "abcd"
ADMIN_SECRET = "efgh"
SERVICE_URL = "http://www.kaltura.com"
USER_NAME = "user"
class KalturaLogger(IKalturaLogger):
def log(self, msg):
logging.info(msg)
def GetConfig():
config = KalturaConfiguration(PARTNER_ID)
config.serviceUrl = SERVICE_URL
config.setLogger(KalturaLogger())
return config
# copied from C# tester
def SampleMetadataOperations():
# The metadata field we'll add/update
metaDataFieldName = "SubtitleFormat"
fieldValue = "VobSub"
# The Schema file for the field
# Currently, you must build the xsd yourself. There is no utility provided.
xsdFile = "MetadataSchema.xsd"
client = KalturaClient(GetConfig())
# start new session (client session is enough when we do operations in a users scope)
ks = client.session.start(ADMIN_SECRET, USER_NAME, KalturaSessionType.ADMIN, PARTNER_ID, 86400, "")
client.setKs(ks)
# Setup a pager and search to use
pager = KalturaFilterPager()
search = KalturaMediaEntryFilter()
search.setOrderBy(KalturaMediaEntryOrderBy.CREATED_AT_ASC)
search.setMediaTypeEqual(KalturaMediaType.VIDEO) # Video only
pager.setPageSize(10)
pager.setPageIndex(1)
print "List videos, get the first one..."
# Get 10 video entries, but we'll just use the first one returned
entries = client.media.list(search, pager).objects
# make sure we have a metadata profile
profile = KalturaMetadataProfile()
profile.setMetadataObjectType(KalturaMetadataObjectType.ENTRY)
viewsData = ""
client.metadata.metadataProfile.add(profile, file(xsdFile, 'rb').read(), viewsData)
# Check if there are any custom fields defined in the KMC (Settings -> Custom Data)
# for the first item returned by the previous listaction
filter = KalturaMetadataProfileFilter()
metadata = client.metadata.metadataProfile.list(filter, pager).objects
profileId = metadata[0].getId()
name = entries[0].getName()
id = entries[0].getId()
if metadata[0].getXsd() != None:
print "1. There are custom fields for video: " + name + ", entryid: " + id
else:
print "1. There are no custom fields for video: " + name + ", entryid: " + id
# Add a custom data entry in the KMC (Settings -> Custom Data)
profile = KalturaMetadataProfile()
profile.setMetadataObjectType(KalturaMetadataObjectType.ENTRY)
viewsData = ""
metadataResult = client.metadata.metadataProfile.update(profileId, profile, file(xsdFile, 'rb').read(), viewsData)
assert(metadataResult.xsd != None)
# Add the custom metadata value to the first video
filter2 = KalturaMetadataFilter()
filter2.setObjectIdEqual(entries[0].id)
xmlData = "<metadata><SubtitleFormat>" + fieldValue + "</SubtitleFormat></metadata>"
metadata2 = client.metadata.metadata.add(profileId, profile.metadataObjectType, entries[0].id, xmlData)
assert(metadata2.xml != None)
print "3. Successfully added the custom data field for video: " + name + ", entryid: " + id
xmlStr = metadata2.xml
print "XML used: " + xmlStr
# Now lets change the value (update) of the custom field
# Get the metadata for the video
filter3 = KalturaMetadataFilter()
filter3.setObjectIdEqual(entries[0].id)
metadataList = client.metadata.metadata.list(filter3).objects
assert(metadataList[0].xml != None)
print "4. Current metadata for video: " + name + ", entryid: " + id
xmlquoted = metadataList[0].xml
print "XML: " + xmlquoted
xml = metadataList[0].xml
# Make sure we find the old value in the current metadata
pos = xml.find("<" + metaDataFieldName + ">" + fieldValue + "</" + metaDataFieldName + ">")
assert(pos >= 0)
pattern = re.compile("<" + metaDataFieldName + ">([^<]+)</" + metaDataFieldName + ">")
xml = pattern.sub("<" + metaDataFieldName + ">Ogg Writ</" + metaDataFieldName + ">", xml)
rc = client.metadata.metadata.update(metadataList[0].id, xml)
print "5. Updated metadata for video: " + name + ", entryid: " + id
xmlquoted = rc.xml
print "XML: " + xmlquoted
# copied from C# tester
def AdvancedMultiRequestExample():
client = KalturaClient(GetConfig())
client.startMultiRequest()
# Request 1
ks = client.session.start(ADMIN_SECRET, USER_NAME, KalturaSessionType.ADMIN, PARTNER_ID, 86400, "")
client.setKs(ks) # for the current multi request, the result of the first call will be used as the ks for next calls
mixEntry = KalturaMixEntry()
mixEntry.setName(".Net Mix")
mixEntry.setEditorType(KalturaEditorType.SIMPLE)
# Request 2
mixEntry = client.mixing.add(mixEntry)
# Request 3
uploadTokenId = client.media.upload(file('demovideo.flv', 'rb'))
mediaEntry = KalturaMediaEntry()
mediaEntry.setName("Media Entry For Mix")
mediaEntry.setMediaType(KalturaMediaType.VIDEO)
# Request 4
mediaEntry = client.media.addFromUploadedFile(mediaEntry, uploadTokenId)
# Request 5
client.mixing.appendMediaEntry(mixEntry.id, mediaEntry.id)
response = client.doMultiRequest()
for subResponse in response:
if isinstance(subResponse, KalturaException):
print "Error occurred: " + subResponse.message
# when accessing the response object we will use an index and not the response number (response number - 1)
assert(isinstance(response[1], KalturaMixEntry))
mixEntry = response[1]
print "The new mix entry id is: " + mixEntry.id
# create session
client = KalturaClient(GetConfig())
ks = client.session.start(ADMIN_SECRET, USER_NAME, KalturaSessionType.ADMIN, PARTNER_ID, 86400, "")
client.setKs(ks)
# add media
uploadTokenId = client.media.upload(file('demovideo.flv', 'rb'))
mediaEntry = KalturaMediaEntry()
mediaEntry.setName("Media Entry Using Python Client")
mediaEntry.setMediaType(KalturaMediaType(KalturaMediaType.VIDEO))
mediaEntry = client.media.addFromUploadedFile(mediaEntry, uploadTokenId)
# serve
DATA_ENTRY_CONTENT = 'bla bla bla'
dataEntry = KalturaDataEntry()
dataEntry.setName('test data entry')
dataEntry.setDataContent(DATA_ENTRY_CONTENT)
addedDataEntry = client.data.add(dataEntry)
serveUrl = client.data.serve(addedDataEntry.id)
f = urllib.urlopen(serveUrl)
assert(DATA_ENTRY_CONTENT == f.read())
# multi request
client = KalturaClient(GetConfig())
client.startMultiRequest()
ks = client.session.start(ADMIN_SECRET, USER_NAME, KalturaSessionType.ADMIN, PARTNER_ID, 86400, "")
client.setKs(ks)
listResult = client.baseEntry.list()
multiResult = client.doMultiRequest()
print multiResult[1].totalCount
client.setKs(multiResult[0])
# error
mediaEntry = KalturaMediaEntry()
mediaEntry.setName("Error media entry")
mediaEntry.setMediaType(KalturaMediaType(KalturaMediaType.VIDEO))
try:
mediaEntry = client.media.addFromUploadedFile(mediaEntry, 'blkjfalkj')
assert(False)
except KalturaException, e:
assert(e.message == 'The uploaded file was not found by the given token id, or was already used')
assert(e.code == 'UPLOADED_FILE_NOT_FOUND_BY_TOKEN')
# multi request error
client = KalturaClient(GetConfig())
client.startMultiRequest()
ks = client.session.start(ADMIN_SECRET, USER_NAME, KalturaSessionType.ADMIN, PARTNER_ID, 86400, "")
client.setKs(ks)
mediaEntry = KalturaMediaEntry()
mediaEntry.setName("Error media entry")
mediaEntry.setMediaType(KalturaMediaType(KalturaMediaType.VIDEO))
client.media.addFromUploadedFile(mediaEntry, 'blkjfalkj')
multiResult = client.doMultiRequest()
client.setKs(multiResult[0])
assert(isinstance(multiResult[1], KalturaException))
assert(multiResult[1].message == 'The uploaded file was not found by the given token id, or was already used')
assert(multiResult[1].code == 'UPLOADED_FILE_NOT_FOUND_BY_TOKEN')
SampleMetadataOperations()
AdvancedMultiRequestExample()
|
'''7.1 Write a program that prompts for a file name, then opens that file and reads
through the file, and print the contents of the file in upper case. Use the
file words.txt to produce the output below.
You can download the sample data at http://www.pythonlearn.com/code/words.txt'''
fname = raw_input("Enter file name: ")
if len(fname) == 0:
print "Incorrect file name, please enter new file name"
fh = open(fname)
for line in fh:
line = line.rstrip().upper()
print line
|
# -*- coding: utf-8 -*-
import itchat
import numpy as np
import pandas as pd
from collections import defaultdict
import re
import jieba
import os
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator
import PIL.Image as Image
itchat.login()
friends = itchat.get_friends(update=True)
NickName = friends[0].NickName # 获取自己的昵称
os.mkdir(NickName) # 为自己创建一个文件夹
file = '\%s' % NickName # 刚刚创建的那个文件夹的相对路径
cp = os.getcwd() # 当前路径
path = os.path.join(cp + file) # 刚刚创建的那个文件夹的绝对路径
os.chdir(path) # 切换路径
number_of_friends = len(friends) # 好友数量
# print(number_of_friends)
df_friends = pd.DataFrame(friends) # pandas可以把据处理成 DataFrame
# 提取并清理签名,得到语料库。
Signatures = df_friends.Signature
regex1 = re.compile('<span.*?</span>') # 匹配表情
regex2 = re.compile('\s{2,}') # 匹配两个以上占位符。
# 用一个空格替换表情和多个空格。
Signatures = [regex2.sub(' ', regex1.sub('', signature, re.S)) for signature in Signatures]
Signatures = [signature for signature in Signatures if len(signature) > 0] # 去除空字符串
text = ' '.join(Signatures)
file_name = NickName+'_wechat_signatures.txt'
with open(file_name, 'w', encoding='utf-8') as f:
f.write(text)
f.close()
# jieba 分词分析语料库
wordlist = jieba.cut(text, cut_all=True)
word_space_split = ' '.join(wordlist)
# 画图
coloring = np.array(Image.open("D:/pythonDemo/2018/May/pic.png")) # 词云的背景和颜色。这张图片在本地。
# 生成词云
my_wordcloud = WordCloud(background_color="white",
max_words=2000,
mask=coloring,
max_font_size=60,
random_state=42,
scale=2,
font_path="D:/pythonDemo/2018/May/simhei.ttf").generate(word_space_split)
# 指定字体,有些字不能解析中文,这种情况下会出现乱码。
# font_path="D:\pythonDemo\2018\May\simhei.ttf"
file_name_p = NickName+'.jpg'
my_wordcloud.to_file(file_name_p) # 保存图片
|
# Hack Machine Language Assembler written in Python3
import sys
import re
# read assembly file(.asm) and parse file content into a list and returns the list, also update symbol table
def readAssemblyFile(file, updateSymbolTable):
assemblyProgramList = []
with open(file, mode='r') as f:
lines = f.read().splitlines()
lineNumber = 0
for line in lines:
if not line: # ignore empty lines
continue
elif line[0:2] == '//': # ignore lines with comment comes first
continue
else:
if '//' in line:
line = line.split('//')[0]
assemblyProgramList.append(line.strip()) # ignore comments inside a line
else:
assemblyProgramList.append(line.strip())
if not updateSymbolTable(line, lineNumber):
lineNumber += 1
return assemblyProgramList
# initialize c-type instruction table
def initializeCInstructionTable():
cInstructionTable = {
"comp": {
'0': '0101010',
'1': '0111111',
'-1': '0111010',
'D': '0001100',
'A': '0110000',
'!D': '0001101',
'!A': '0110001',
'-D': '0001111',
'-A': '0110011',
'D+1': '0011111',
'A+1': '0110111',
'D-1': '0001110',
'A-1': '0110010',
'D+A': '0000010',
'D-A': '0010011',
'A-D': '0000111',
'D&A': '0000000',
'D|A': '0010101',
'M': '1110000',
'!M': '1110001',
'-M': '1110011',
'M+1': '1110111',
'M-1': '1110010',
'D+M': '1000010',
'D-M': '1010011',
'M-D': '1000111',
'D&M': '1000000',
'D|M': '1010101'
},
"dest": {
"null": '000',
"M": "001",
"D": "010",
"MD": "011",
"A": "100",
"AM": "101",
"AD": "110",
"AMD": "111"
},
"jump": {
"null": '000',
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111"
}
}
return cInstructionTable
# initialize symbol table
def initializeSymbolTable():
symbolTable = {
"R0": "0",
"R1": "1",
"R2": "2",
"R3": "3",
"R4": "4",
"R5": "5",
"R6": "6",
"R7": "7",
"R8": "8",
"R9": "9",
"R10": "10",
"R11": "11",
"R12": "12",
"R13": "13",
"R14": "14",
"R15": "15",
"SCREEN": "16384",
"KBD": "24576",
"SP": "0",
"LCL": "1",
"ARG": "2",
"THIS": "3",
"THAT": "4"
}
return symbolTable
# update symbol table from user defined symbols
def updateSymbolTable(oneLineAssembly, lineNumber):
foundParenthesis = False
if '@' in oneLineAssembly: # update variable symbols
symbol = oneLineAssembly.strip().split('@')[1]
if symbol not in symbolTable and not symbol.isdigit():
symbolTable[symbol] = USER_DEFIEND_SYMBOL_INIT
elif '(' and ')' in oneLineAssembly: # update label symbols
symbol = oneLineAssembly[oneLineAssembly.find("(") + 1 : oneLineAssembly.find(")")]
symbolTable[symbol] = str(lineNumber)
foundParenthesis = True
return foundParenthesis
# main algorithm to transfer Hack machine language into binary format and put them into a list
def transferHackAssemblyToBinaryCode(assemblyProgramList):
binaryProgramList = []
variableSymbolAddress = 16
for assembly in assemblyProgramList:
if '@' in assembly: # decode A-type instruction
symbol = assembly.strip().split('@')[1]
if not symbol.isdigit():
if symbolTable[symbol] == USER_DEFIEND_SYMBOL_INIT:
symbolTable[symbol] = str(variableSymbolAddress)
variableSymbolAddress += 1
digitToBinary = '{0:016b}'.format(int(symbolTable[symbol]))
else:
digitToBinary = '{0:016b}'.format(int(symbol))
binaryProgramList.append(digitToBinary)
elif '=' in assembly or ';' in assembly: # decode C-type instruction
firstThreeBits = '111'
compBits = '-------'
destBits = '000'
jumpBits = '000'
if '=' in assembly and ';' not in assembly:
dest, comp = assembly.split('=')
destBits = cInstructionTable["dest"][dest]
compBits = cInstructionTable["comp"][comp]
elif '=' in assembly and ';' in assembly:
dest, comp, jump = re.split('=|;', assembly)
destBits = cInstructionTable["dest"][dest]
compBits = cInstructionTable["comp"][comp]
jumpBits = cInstructionTable["jump"][jump]
elif ';' in assembly:
comp, jump = assembly.split(';')
jumpBits = cInstructionTable["jump"][jump]
compBits = cInstructionTable["comp"][comp]
binaryProgramList.append(firstThreeBits + compBits + destBits + jumpBits)
return binaryProgramList
# write binary format program into a file
def writeBinaryProgramToFile(fileName, binaryProgramList):
outputFileName = fileName.split('.')[0] + '.hack'
with open(outputFileName, mode='w') as f:
for line in binaryProgramList:
f.write(line + '\n')
USER_DEFIEND_SYMBOL_INIT = "INIT"
# initialize c-type instruction table
cInstructionTable = initializeCInstructionTable()
# initialize symbol table
symbolTable = initializeSymbolTable()
def main():
if len(sys.argv) == 2:
# read assembly file(.asm) and parse file content into a list, also update symbol table
fileName = sys.argv[1]
assemblyProgramList = readAssemblyFile(fileName, updateSymbolTable)
# main algorithm to transfer Hack machine language into binary format and put them into a list
binaryProgramList = transferHackAssemblyToBinaryCode(assemblyProgramList)
# write binary format program into a file
writeBinaryProgramToFile(fileName, binaryProgramList)
else:
print("Usage: python hack_assembler.py [assemblyFile.asm]")
if __name__ == '__main__':
main() |
import numpy as np
from numpy import cos, sin, tan, arctan, radians, degrees, arcsin, arctan2, sqrt, arccos
from uncertainties import unumpy
from math import log
from scipy.linalg import lstsq
from datetime import datetime
import matplotlib.pyplot as plt
from PyGEL3D import gel
def dynecm2nm(x):
return x * 1e-7
def nm2dynecm(x):
return x * 1e7
def spherical2cart(pos):
r, theta, phi = pos
theta = radians(theta)
phi = radians(phi)
return (
r * sin(theta) * cos(phi),
r * sin(theta) * sin(phi),
r * cos(theta)
)
def cart2spherical(pos):
x, y, z = pos
r = sqrt(np.sum(np.array(pos) ** 2))
theta = degrees(arctan2(sqrt(x**2 + y**2), z))
phi = degrees(arctan2(y, x))
return r, theta, phi
def distance(pos1, pos2):
x1, y1, z1 = spherical2cart(pos1)
x2, y2, z2 = spherical2cart(pos2)
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)
def haversine(pos1, pos2):
lon1, lat1, lon2, lat2 = map(radians, [pos1[0], pos1[1], pos2[0], pos2[1]])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * arcsin(sqrt(a))
km = 6371 * c
return km
def vincenty(lat1, lon1, lat2, lon2):
"""
Written by https://www.johndcook.com/
"""
a = 6378137.0 # equatorial radius in meters
f = 1 / 298.257223563 # ellipsoid flattening
b = (1 - f) * a
tolerance = 1e-11 # to stop iteration
phi1, phi2 = lat1, lat2
U1 = arctan((1 - f) * tan(phi1))
U2 = arctan((1 - f) * tan(phi2))
L1, L2 = lon1, lon2
L = L2 - L1
lambda_old = L + 0
while True:
t = (cos(U2) * sin(lambda_old)) ** 2
t += (cos(U1) * sin(U2) - sin(U1) * cos(U2) * cos(lambda_old)) ** 2
sin_sigma = t ** 0.5
cos_sigma = sin(U1) * sin(U2) + cos(U1) * cos(U2) * cos(lambda_old)
sigma = arctan2(sin_sigma, cos_sigma)
sin_alpha = cos(U1) * cos(U2) * sin(lambda_old) / sin_sigma
cos_sq_alpha = 1 - sin_alpha ** 2
cos_2sigma_m = cos_sigma - 2 * sin(U1) * sin(U2) / cos_sq_alpha
C = f * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha)) / 16
t = sigma + C * sin_sigma * (cos_2sigma_m + C * cos_sigma * (-1 + 2 * cos_2sigma_m ** 2))
lambda_new = L + (1 - C) * f * sin_alpha * t
if abs(lambda_new - lambda_old) <= tolerance:
break
else:
lambda_old = lambda_new
u2 = cos_sq_alpha * ((a ** 2 - b ** 2) / b ** 2)
A = 1 + (u2 / 16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
B = (u2 / 1024) * (256 + u2 * (-128 + u2 * (74 - 47 * u2)))
t = cos_2sigma_m + 0.25 * B * (cos_sigma * (-1 + 2 * cos_2sigma_m ** 2))
t -= (B / 6) * cos_2sigma_m * (-3 + 4 * sin_sigma ** 2) * (-3 + 4 * cos_2sigma_m ** 2)
delta_sigma = B * sin_sigma * t
s = b * A * (sigma - delta_sigma)
return s
def distance_from_hull(hull, mts):
points = [spherical2cart(mt.pos) for mt in mts]
m = gel.Manifold()
for s in hull.simplices:
m.add_face(hull.points[s])
dist = gel.MeshDistance(m)
res = []
for p in points:
d = dist.signed_distance(p)
if dist.ray_inside_test(p):
if d > 0:
d *= -1
else:
if d < 0:
d *= -1
res.append(d)
res = np.round(np.array(res) / 1000, 2) # m2km
return res
def rotate(data, strike):
data = data.copy()
theta = 270 - strike if strike < 270 else 90 - strike
theta = radians(theta)
c, s = np.cos(theta), np.sin(theta)
Rz = np.array(((c, -s, 0.), (s, c, 0.), (0., 0., 1.)))
for i in range(len(data)):
data[i, :2] = Rz.dot(data[i, :2])
return data
def planefit(tensors):
data = np.row_stack([spherical2cart(tensor.pos) for tensor in tensors])
mn = np.min(data, axis=0)
mx = np.max(data, axis=0)
dx = 1e4
res = abs(int((mn[0] - mx[1]) / dx)) # 20
X, Y = np.meshgrid(np.linspace(mn[0], mx[0], res), np.linspace(mn[1], mx[1], res))
XX = X.flatten()
YY = Y.flatten()
A = np.c_[np.ones(data.shape[0]), data[:, :2], np.prod(data[:, :2], axis=1), data[:, :2] ** 2]
C, res, _, _ = lstsq(A, data[:, 2])
Z = np.dot(np.c_[np.ones(XX.shape), XX, YY, XX * YY, XX ** 2, YY ** 2], C).reshape(X.shape)
# print('COEFFICIENTS: ', C)
from scipy import integrate
b = C[1]; c = C[2]; d = C[3]; e = C[4]; f = C[5]
fun = lambda x, y: sqrt((b + d * y + 2 * e * x) ** 2 + (c + d * x + 2 * f * y) ** 2 + 1)
# print('AREA: ')
area = integrate.dblquad(fun, mn[0], mx[0], lambda x: mn[1], lambda x: mx[1])
return X, Y, Z, area
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = np.linalg.det([[1, a[1], a[2]],
[1, b[1], b[2]],
[1, c[1], c[2]]])
y = np.linalg.det([[a[0], 1, a[2]],
[b[0], 1, b[2]],
[c[0], 1, c[2]]])
z = np.linalg.det([[a[0], a[1], 1],
[b[0], b[1], 1],
[c[0], c[1], 1]])
magnitude = (x ** 2 + y ** 2 + z ** 2) ** .5
return x / magnitude, y / magnitude, z / magnitude
# area of polygon poly
def poly_area(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
N = len(poly)
for i in range(N):
vi1 = poly[i]
vi2 = poly[(i + 1) % N]
prod = np.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = np.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result / 2)
def surface_area(tensors=None, x=None, y=None, z=None):
if tensors is not None:
x, y, z = planefit(tensors)
area = 0
for j in range(len(y) - 1):
for i in range(len(x) - 1):
dA = poly_area([[x[i, j], y[i, j], z[i, j]],
[x[i + 1, j], y[i + 1, j], z[i + 1, j]],
[x[i, j + 1], y[i, j + 1], z[i, j + 1]],
[x[i + 1, j + 1], y[i + 1, j + 1], z[i + 1, j + 1]]])
area += dA
return area
def avg_strain_tensor(tensors, area=None):
if tensors is not None:
area = surface_area(tensors)
volume = area * 1e5
mu = 3.3e10
tensor = tensor_sum(tensors)
strain_tensor = tensor.mt_e / (2 * mu * volume)
return strain_tensor
def second_invariant(eps):
eigval = np.linalg.eigh(eps)[0]
I1 = np.sum(eigval)
I2 = eigval[2] * eigval[1] + eigval[2] * eigval[0] + eigval[1] * eigval[0]
J2 = I1 ** 2 - 2 * I2
return J2
def plate_velocity(tensors):
dates = [mt.date for mt in tensors]
mu = 3.3e10
t = (max(dates) - min(dates)).total_seconds() / (3600 * 24 * 365)
x, y, z = planefit(tensors)
l = max(abs(max(x.flatten()) - min(x.flatten())), abs(max(y.flatten()) - min(y.flatten())))
w = abs(max(z.flatten()) - min(z.flatten()))
print(l)
print(w)
v = sum_m0(tensors) / (mu * l * w * t)
return v
def plate_velocity_usgs(usgs_df, hull):
import pandas as pd
mu = 3.3e10
times = usgs_df[['time']].to_numpy().flatten()
t = pd.Timedelta(max(times) - min(times)).value / 3.154e+16 # convert ns to year
def point_in_hull(point, hull, tolerance=1e-12):
return all(
(np.dot(eq[:-1], point) + eq[-1] <= tolerance)
for eq in hull.equations)
def princax(tensor):
mt = np.array(tensor.mt_e, dtype=np.float_)
val, vct = np.linalg.eigh(mt)
pl = arcsin(-vct[0])
az = arctan2(vct[2], -vct[1])
for i in range(3):
if pl[i] <= 0:
pl[i] = -pl[i]
az[i] += np.pi
if az[i] < 0:
az[i] += 2 * np.pi
if az[i] > 2 * np.pi:
az[i] -= 2 * np.pi
pl = degrees(pl)
az = degrees(az)
t = (val[0], pl[0], az[0])
b = (val[1], pl[1], az[1])
p = (val[2], pl[2], az[2])
return t, b, p
def angle(v1, v2, acute):
angle = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
if acute == True:
return angle
else:
return 2 * np.pi - angle
def kaverina(dipt, dipb, dipp):
zt = sin(radians(dipt))
zb = sin(radians(dipb))
zp = sin(radians(dipp))
l = 2 * sin(0.5 * arccos((zt + zb + zp) / sqrt(3)))
n = sqrt(2 * ((zb - zp) ** 2 + (zb - zt) ** 2 + (zt - zp) ** 2))
x = sqrt(3) * (l / n) * (zt - zp)
y = (l / n) * (2 * zb - zp - zt)
return x, y
def kaverina_base():
fig = plt.figure()
plt.axes().set_aspect('equal')
deg = 90 - 40
degg = sin(radians(90 - deg)) * cos(radians(deg))
B = degrees(arcsin(sqrt(np.linspace(0, 1, 51) * degg)))
P = degrees(arcsin(sqrt((1 - np.linspace(0, 1, 51)) * degg)))
X, Y = kaverina(deg, B, P)
plt.plot(X, Y, '--', color='grey', linewidth=1)
deg = 90 - 30
degg = sin(radians(90 - deg)) * cos(radians(deg))
B = degrees(arcsin(sqrt(np.linspace(0, 1, 51) * degg)))
P = degrees(arcsin(sqrt((1 - np.linspace(0, 1, 51)) * degg)))
X, Y = kaverina(P, B, deg)
plt.plot(X, Y, '--', color='grey', linewidth=1)
X, Y = kaverina(P, deg, B)
plt.plot(X, Y, '--', color='grey', linewidth=1)
tickx, ticky = kaverina(range(0, 91, 10), np.zeros((1, 10)), range(90, -1, -10))
# plt.plot(X[0], Y[0], color='black', linewidth=2)
plt.scatter(tickx[0][1:], ticky[0][1:], marker=3, c='black', linewidth=1)
for i in range(1, 10):
plt.text(tickx[0][i] - 0.04, ticky[0][i] - 0.04, i * 10, fontsize=9, verticalalignment='top')
plt.text(0, -0.75, 'T axis plunge', fontsize=9, horizontalalignment='center')
T = degrees(arcsin(sqrt(np.linspace(0, 1, 101))))
P = degrees(arcsin(sqrt(1 - (np.linspace(0, 1, 101)))))
X, Y = kaverina(T, 0., P)
plt.plot(X, Y, color='black', linewidth=1)
X, Y = kaverina(P, T, 0.)
plt.plot(X, Y, color='black', linewidth=1)
X, Y = kaverina(0., P, T)
plt.plot(X, Y, color='black', linewidth=1)
tickx, ticky = kaverina(np.zeros((1, 10)), range(0, 91, 10), range(90, -1, -10))
# plt.plot(X[0], Y[0], color='black', linewidth=2)
plt.scatter(tickx[0][1:], ticky[0][1:], marker=0, c='black', linewidth=1)
for i in range(1, 10):
plt.text(tickx[0][i] - 0.04, ticky[0][i] - 0.02, i * 10, fontsize=9, horizontalalignment='right')
plt.text(-0.63, 0.25, 'B axis plunge', fontsize=9, horizontalalignment='center', rotation=60)
plt.axis('off')
return fig
def triangle(tdip, bdip, pdip):
tdip = radians(tdip)
bdip = radians(bdip)
pdip = radians(pdip)
mid = radians(35.26)
# print(tdip[0], bdip[0], pdip[0])
psi = arctan2(sin(tdip), sin(pdip)) - radians(45)
print(psi)
# a = cos(35.26) * sin(bdip) * cos(psi)
a = sin(mid) * cos(bdip) * cos(psi)
# c = sin(35.26) * sin(bdip)
b = sin(mid) * sin(bdip) + cos(mid) * sin(bdip) * cos(psi)
h = cos(bdip) * sin(psi) / b
v = (cos(mid) * sin(bdip) - a) / b
return h, v
def radius(theta):
theta = radians(theta)
r0 = 6356.752 * 1e3 # WGS84
r1 = 6378.137 * 1e3
return sqrt(((r1 ** 2 * cos(theta)) ** 2 + (r0 ** 2 * sin(theta)) ** 2) /
((r1 * cos(theta)) ** 2 + (r0 * sin(theta)) ** 2))
def _perturb(mt):
per = np.zeros_like(mt.mt_err)
for (i, j), value in np.ndenumerate(mt.mt_err):
per[i][j] = np.random.normal(scale=value)
mt_per = MomentTensor(mt.mt + per, mt.exp)
return mt_per
def simulate_similarity_to_group(mt, mt_group, n=50000):
Cs = []#np.zeros(n)
triangle_factor = []#np.zeros(n)
dips_group = radians(mt_group.axes[:, 1])
for i in range(n):
_mt_per = _perturb(mt)
if _mt_per.fclvd > 0.2:
continue
dips = radians(_mt_per.axes[:, 1])
# triangle_factor[i] = np.sum(np.abs(sin(dips_group) ** 2 - sin(dips) ** 2))
# Cs[i] = seismic_consistency([_mt_per, mt_group])
triangle_factor.append(np.sum(np.abs(sin(dips_group) ** 2 - sin(dips) ** 2)))
Cs.append(seismic_consistency([_mt_per, mt_group]))
return Cs, triangle_factor
def simulate_uncertainty(mt, n=50000):
m0_per, f_clvd, axes = [], [], []
for _ in range(n):
mt_per = _perturb(mt)
axes.append(princax(mt_per))
m0_per.append(mt_per.m0)
f_clvd.append(mt_per.fclvd)
return m0_per, f_clvd, np.array(axes)
def simulate_uncertainty_group(tensors, n=50000):
Cs = []
for _ in range(n):
_tensors = []
for tensor in tensors:
_tensors.append(_perturb(tensor))
Cs.append(seismic_consistency(_tensors))
return Cs
def seismic_consistency(tensors: list):
# testing_tensors_m0_sum = sum_m0(tensors)
testing_tensors_m0_sum = len(tensors) # for any normalised tensor m0 is 1
testing_tensor_sum_m0 = tensor_sum_normalized(tensors).m0
Cs = testing_tensor_sum_m0 / testing_tensors_m0_sum
return Cs
def b_value(tensors, mw_min):
mws = [tensor.mw for tensor in tensors]
m = mws[mws >= mw_min]
return (np.mean(m) - mw_min) * np.log(10)
def sum_m0(tensors):
return np.sum([tensor.m0 for tensor in tensors])
def tensor_sum(tensors):
mt_sum = _sum_mt(tensors)
try:
exp = log(np.max(mt_sum), 10) // 1
except TypeError:
exp = log(np.max(unumpy.nominal_values(mt_sum)), 10) // 1
mt_sum /= 10 ** exp
tensor = MomentTensor(mt_sum, exp)
return tensor
def tensor_sum_normalized(tensors):
try:
mt_sum, mt_err_sum = _sum_mt_normalized(tensors)
exp = log(np.max(mt_sum), 10) // 1
mt_sum /= 10 ** exp
mt_err_sum /= 10 ** exp
except ValueError:
mt_sum = _sum_mt_normalized(tensors)
exp = log(np.max(mt_sum), 10) // 1
mt_sum /= 10 ** exp
mt_err_sum = None
# try:
# exp = log(np.max(mt_sum), 10) // 1
# except TypeError:
# exp = log(np.max(unumpy.nominal_values(mt_sum)), 10) // 1
# mt_sum /= 10 ** exp
# mt_err_sum /= 10 ** exp
tensor = MomentTensor(mt_sum, exp, mt_err_sum)
return tensor
def row2mt(data):
date = datetime.strptime(data[0], '%Y/%m/%d').date()
pos = (radius(data[1]), data[1], data[2]) # r, lat, lon
depth = data[3]
name = data[7]
exp = data[8]
mt = np.array([[data[9], data[15], data[17]],
[data[15], data[11], data[19]],
[data[17], data[19], data[13]]])
mt_err = np.array([[data[10], data[16], data[18]],
[data[16], data[12], data[20]],
[data[18], data[20], data[14]]])
return MomentTensor(mt, exp, mt_err, pos, depth, date, name)
def row2mt_new(data):
date = data[1]
pos = (radius(data[2]), data[2], data[3]) # r, lat, lon
depth = data[4]
name = data[0]
exp = data[5]
mt = np.array(data[6])
mt_err = np.array(data[7])
return MomentTensor(mt, exp, mt_err, pos, depth, date, name)
def strain_tensor(tensors, volume, mu=3.3e10):
return _sum_mt(tensors) / (2 * mu * volume) # TODO
def plate_vel(tensors, l, w, t, mu=3.3e10):
return sum_m0(tensors) / (mu * l * w * t)
def _sum_mt(tensors):
return np.sum([tensor.mt_e for tensor in tensors], axis=0)
def _sum_mt_normalized(tensors):
# / tensor.m0 to normalise
mt = np.sum([tensor.mt_e / tensor.m0 for tensor in tensors], axis=0)
try:
mt_err = np.sum([tensor.mt_err_e / tensor.m0 for tensor in tensors], axis=0)
return mt, mt_err
except AttributeError:
return mt
def mw2m0(mw):
return 10 ** (3/2 * (mw + 10.7))
def _m0(mt):
eigvals, _ = np.linalg.eigh(mt.mt)
return sqrt(np.sum(eigvals ** 2) / 2) * 10 ** mt.exp
def _mw(mt):
return 2 / 3 * log(mt.m0, 10) - 10.7
class MomentTensor(object):
def __init__(self, mt, exp, mt_err=None, pos=None, depth=None, date=None, name=None):
self.exp = exp
self.pos = pos
self.depth = depth
self.date = date
self.name = name
if np.shape(mt) == (3, 3):
self.mt = np.array(mt, dtype=np.float_)
else:
self.mt = np.array([[mt[0], mt[3], mt[4]],
[mt[3], mt[1], mt[5]],
[mt[4], mt[5], mt[2]]], dtype=np.float_)
if mt_err is not None:
if np.shape(mt_err) == (3, 3):
self.mt_err = np.array(mt_err, dtype=np.float_)
else:
self.mt_err = np.array([[mt_err[0], mt_err[3], mt_err[4]],
[mt_err[3], mt_err[1], mt_err[5]],
[mt_err[4], mt_err[5], mt_err[2]]], dtype=np.float_)
@property
def mt_e(self):
return self.mt * 10 ** self.exp
@property
def mt_err_e(self):
return self.mt_err * 10 ** self.exp
@property
def mt6(self):
return [self.mt[0, 0], self.mt[1, 1], self.mt[2, 2],
self.mt[0, 1], self.mt[0, 2], self.mt[1, 0]]
@property
def m0(self):
return _m0(self)
@property
def mw(self):
return _mw(self)
@property
def r(self):
return self.pos[0]
@property
def lat(self):
return self.pos[1]
@property
def lon(self):
return self.pos[2]
@property
def axes(self):
return np.array(princax(self))
@property
def fclvd(self):
t, b, p = self.axes[:, 0]
return abs(b) / max(abs(t), abs(p))
@property
def e_rel(self):
u = np.linalg.norm(self.mt_err)
m = np.linalg.norm(self.mt)
return u / m
|
import datetime
import json
from enum import Enum
class TimeHelper:
'''Useful module for converting times and shit.
'''
@staticmethod
def sec_to_str(sec):
'''
Convert Seconds to readable text format
:param sec: Seconds
:return:
'''
m = 0
h = 0
d = 0
s = sec
while s >= 86400:
d += 1
s -= 86400
# print(f'Add D \tS:{s}\t{d}d\t{h}h\t{m}m\t{s}s', end='\r')
while s >= 60 * 60:
h += 1
s -= 60 * 60
# print(f'Add S \tS:{s}\t{d}d\t{h}h\t{m}m\t{s}s', end='\r')
while s >= 60:
m += 1
s -= 60
# print(f'Add M \tS:{s}\t{d}d\t{h}h\t{m}m\t{s}s', end='\r')
return f'{d}d {h}h {m}m {s}s'
class DbS(Enum):
MUTES = 'dbs/mutes.json'
KICKS = 'dbs/kicks.json'
BANS = 'dbs/bans.json'
LOGS = 'dbs/logs.json'
class DB:
'''Database commands'''
@staticmethod
def addlog(uid: int, timestamp: int, item: str, description: str, modrid: int, end: int=None):
'''
Add a log object to the json database
:param uid: User ID.
:param timestamp: The timestamp that the item is recorded.
:param item: The item that user infracted(mute, kick, ban, warn)
:param description: Descriptions of the action, also includes different parameters, for specific types.
:param modrid: Moderator responsible
:param end:
:return: None
'''
types = ['warn','mute','kick','tempban','ban']
if item not in types:
raise TypeError(f'Type {item} is not in the acceptable '
f'list.')
file = open(DbS.LOGS.value, 'r')
logs = json.load(file)
file.close()
if uid not in logs:
logs[uid] = []
logs[uid].append({
"time": timestamp,
"cate": item,
"desc": {},
"modr": modrid
})
logs[uid][len(logs[uid]) - 1]['desc']['reason'] = description
if item == 'mute':
if end is None:
raise TypeError('"end" parameter does not have a value when using "mute" type')
logs[uid][len(logs[uid])-1]['desc']['start'] = timestamp
logs[uid][len(logs[uid])-1]['desc']['end'] = timestamp + end
elif item == 'tempban':
logs[uid][len(logs[uid]-1)]['desc']['start'] = timestamp
logs[uid][len(logs[uid]) - 1]['desc']['end'] = timestamp + end
@staticmethod
def addmute(uid: int, seconds: int, reason: str):
'''
Add a mute record for a user
:param uid: User ID
:param seconds: Seconds for the mute(either calculated by hand/code or by TimeHelper.tosec()
:param reason: Reason to be muted
:return:None
'''
file = open(DbS.MUTES.value, 'r')
mutes = json.load(file)
mutes['uid'] = datetime.datetime.now().timestamp() + seconds
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Determines the correct downloader implementation to return based on the
feed type.
"""
import logging
import urlparse
from pulp_puppet.plugins.importers.downloaders.exceptions import UnsupportedFeedType, InvalidFeed
from pulp_puppet.plugins.importers.downloaders.web import HttpDownloader
from pulp_puppet.plugins.importers.downloaders.local import LocalDownloader
# -- constants ----------------------------------------------------------------
# Mapping from feed prefix to downloader class
MAPPINGS = {
'file' : LocalDownloader,
'http' : HttpDownloader,
}
_LOG = logging.getLogger(__name__)
# -- public -------------------------------------------------------------------
def get_downloader(feed, repo, conduit, config):
"""
Returns an instance of the correct downloader to use for the given feed.
:param feed: location from which to sync modules
:type feed: str
:param repo: describes the repository being synchronized
:type repo: pulp.plugins.model.Repository
:param conduit: sync conduit used during the sync process
:type conduit: pulp.plugins.conduits.repo_sync.RepoSyncConduit
:param config: configuration of the importer and call
:type config: pulp.plugins.config.PluginCallConfiguration
:return: downloader instance to use for the given feed
:raise UnsupportedFeedType: if there is no applicable downloader for the
given feed
:raise InvalidFeed: if the feed cannot be parsed to determine the type
"""
feed_type = _determine_feed_type(feed)
if feed_type not in MAPPINGS:
raise UnsupportedFeedType(feed_type)
downloader = MAPPINGS[feed_type](repo, conduit, config)
return downloader
def is_valid_feed(feed):
"""
Returns whether or not the feed is valid.
:param feed: repository source
:type feed: str
:return: true if the feed is valid; false otherwise
:rtype: bool
"""
try:
feed_type = _determine_feed_type(feed)
is_valid = feed_type in MAPPINGS
return is_valid
except InvalidFeed:
return False
# -- private ------------------------------------------------------------------
def _determine_feed_type(feed):
"""
Returns the type of feed represented by the given feed.
:param feed: feed being synchronized
:type feed: str
:return: type to use to retrieve the downloader instance
:rtype: str
:raise InvalidFeed: if the feed is invalid and a feed cannot be
determined
"""
try:
proto, netloc, path, params, query, frag = urlparse.urlparse(feed)
return proto
except Exception:
_LOG.exception('Exception parsing feed type for feed <%s>' % feed)
raise InvalidFeed(feed)
|
import os.path
import subprocess
class SourcesList:
def mostrar(self):
f = open('/etc/apt/sources.list','r')
return (True,f.read())
def guardar(self,linies):
f = open('/etc/apt/sources.list','w')
f.write(linies)
def mostrarPropis(self):
f = open('/usr/share/lliurex-apt2/reps','r')
return (True,f.read())
def execUpdate(self):
p1 = subprocess.Popen(["apt-get","update"], stdout=subprocess.PIPE)
p1.wait()
if p1.stderr:
result = p1.stderr.readlines()
else:
result= p1.stdout.readlines()
return result
def guardarPropis(self,linies):
f = open('/usr/share/lliurex-apt2/reps','w')
f.write(linies)
def execAptUpdate(self,linies):
self.guardar(linies)
return self.execUpdate()
def test(self,user,password):
#ret=validate_user(user,pwd)
#print ret
#return ret
return True
#class SourcesList
if __name__=="__main__":
SL = SourcesList()
print SL.mostrar()
|
from django.urls import path
from student import views
urlpatterns = [
path('home/', views.homeView, name="Home"),
path('register/', views.registerView, name="register"),
path('student/show/',views.showView,name="show_student"),
path('register_student/', views.register.as_view(), name="Home"),
path('register_student/<pk>', views.register.as_view(), name="Home"),
] |
#!/usr/bin/env python
# This script is for getting more information out of the DSS ES instance about links.json files.
# Source your environment correctly `source environment && source environment.{stage}`
# Make sure to set your DSS_ES_ENDPOINT environment variable, this can be retrieved from running
# `dssops lambda environment`
# the ES instance has Access Control based on IP, so you'll have to change the access policy
# Make sure to change it back, or you're gonna break CD pipelines.
# Run the script with `python links_inspection.py`
# This should have been kept inside the Azul/Attic, but there are DSS specific classes
# See Azul/1727
from __future__ import print_function
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
import deepdiff
import pprint
import copy
import os
import sys
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
from dss.index.es import ElasticsearchClient
from dss import Config, Replica, ESIndexType, dss_handler, DSSException
from dss import ESDocType
# see last paragraph of https://docs.python.org/3/library/sys.html#sys.getsizeof
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def _format_request_body(page: dict, es_query: dict, replica: Replica, output_format: str) -> dict:
result_list = [] # type: typing.List[dict]
for hit in page['hits']['hits']:
result = {
'bundle_fqid': hit['_id'],
'search_score': hit['_score']
}
if output_format == 'raw':
result['metadata'] = hit['_source']
result_list.append(result)
return {
'es_query': es_query,
'results': result_list,
'total_hits': page['hits']['total']
}
es_client = ElasticsearchClient.get()
replica = Replica.aws
es_query = { "query": { "bool": { "must": [ { "exists": { "field": "files.links_json"}} ] } } }
output_format = 'raw'
per_page = 1000
search_after = None
# Do not return the raw indexed data unless it is requested
if output_format != 'raw':
es_query['_source'] = False
# https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-request-search-after.html
es_query['sort'] = [
{"uuid": {"order": "desc"}},
{"manifest.version": {"missing": "last", "order": "desc"}}
]
def search(search_after: str = None):
if search_after is None:
page = es_client.search(index=Config.get_es_alias_name(ESIndexType.docs, replica),
doc_type=ESDocType.doc.name,
size=per_page,
body=es_query,
)
else:
es_query['search_after'] = search_after.split(',')
page = es_client.search(index=Config.get_es_alias_name(ESIndexType.docs, replica),
doc_type=ESDocType.doc.name,
size=per_page,
body=es_query,
)
return page
total_hits = 0
current_page = 0
# changing this max_pages to 1-2 allows for testing, 300 is overkill it for this search we expect ~150 requests
max_pages = 300
processing_lookup = dict()
largest_link_json = None
largest_link_json_size = 0
largest_bundle = None
histogram = dict() # Key is the len of links in a links.json file, value is count.
def print_stats():
print(f'total process hits: {total_hits}')
print(f"total number of unique processes: {len(processing_lookup)}")
# pprint.pprint(largest_link_json)
print(f"size in bytes of largest links_json: {largest_link_json_size}")
# print(largest_bundle)
for k,v in histogram.items():
histogram[k]['unique_process'] = len(v['unique_process']) # if you can remove this loop to inspect the sets.
pprint.pprint(histogram)
#pprint.pprint(processing_lookup)
while True:
page = search(search_after)
try:
next_page = page['hits']['hits'][-1]['sort']
except IndexError:
print('i think we got everything')
print_stats()
break
search_after = ','.join(page['hits']['hits'][-1]['sort'])
current_page += 1
fmt_page = _format_request_body(page,es_query,replica,output_format)
for bundles in fmt_page['results']:
# sizing stuff
size = total_size(bundles['metadata']['files']['links_json'])
if largest_link_json_size < size:
largest_link_json_size = size
largest_link_json = bundles['metadata']['files']['links_json']
largest_bundle_fqid = bundles
#histogram stuff
number_of_links = len(bundles['metadata']['files']['links_json'][0]['links'])
if histogram.get(number_of_links) is None:
histogram[number_of_links] = {'number_of_links_json': 1, 'unique_process': set()}
else:
histogram[number_of_links]['number_of_links_json'] += 1
# comparing links_json obj
for link in bundles['metadata']['files']['links_json'][0]['links']:
total_hits += 1
processes = link['process']
# add the processessID here to the unique_set
histogram[number_of_links]['unique_process'].add(processes)
if processes not in processing_lookup:
processing_lookup[processes] = copy.deepcopy(link)
else:
difference = deepdiff.DeepDiff(link, processing_lookup[processes])
if len(difference.keys()) == 0:
continue
else:
print(f'WARNING:: process metadata DOES NOT match for collision: {processing_lookup} {link}')
if max_pages <= current_page:
print_stats()
exit()
|
from pymongo import MongoClient
#client = MongoClient('mongodb://192.168.0.110:27019')
client = MongoClient()
database = client.chapter_3
collection = database.example_data_2
collection.insert_many([
{"name": "朱小三", "age": 20, "address": "北京"},
{"name": "刘小四", "age": 21, "address": "上海"},
{"name": "马小五", "age": 22, "address": "山东"},
{"name": "夏侯小七", "age": 23, "address": "河北"},
{"name": "公孙小八", "age": 24, "address": "广州"},
{"name": "慕容小九", "age": 25, "address": "杭州"},
{"name": "欧阳小十", "age": 26, "address": "深圳"}
]) |
from ftplib import FTP
def writeline(data):
fd.write(data + "\n")
f = FTP('ftp.kernel.org')
f.login()
f.cwd('/pub/linux/kernel')
fd = open('README','wt')
f.retrlines('RETR README',writeline)
fd.close()
fd = open("patch.gz",'wb')
f.retrbinary("RETR README",fd.write)
fd.close()
f.quit()
|
# Generated by Django 3.1.6 on 2021-03-05 21:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flight', '0002_flight_price'),
]
operations = [
migrations.RemoveField(
model_name='requirement',
name='have_visa',
),
migrations.AddField(
model_name='requirement',
name='name',
field=models.CharField(default='', max_length=150),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 10:02:21 2022
@author: Pierre Jouannais, Department of Planning, DCEA, Aalborg University
pijo@plan.aau.dk
"""
'''
This script was used to combine different chunks of backrogund MC results to reach certain sizes
that better match the computing performances of the instances that will use these chunks to simulate the LCA results.
It is not necessariæy needed if the chunks already have adequate sizes.
'''
import pandas as pd
import decimal
from random import *
import pstats
from itertools import *
from math import*
import csv
import copy
import numpy as np
import random
import datetime
from time import *
import pickle
def importpickle(path):
with open(path, 'rb') as pickle_load:
obj = pickle.load(pickle_load)
return obj
def export_pickle_2(var, name_var, namefolder_in_root):
'''Saves a pickle in the working directory and
saves the object in input across python sessions'''
path_object = "../"+namefolder_in_root+"/"+name_var+".pkl"
with open(path_object, 'wb') as pickle_file:
pickle.dump(var, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)
back_mc_1=importpickle("../Background_mc/500000/montecarlo_background_12_13_311955size=235000.pkl")
back_mc_2=importpickle("../Background_mc/500000/montecarlo_background_12_13_574232size=97800.pkl")
back_mc_3=importpickle("../Background_mc/500000/montecarlo_background_12_14_074170size=168000.pkl")
combined= back_mc_1 + back_mc_2 + back_mc_3
size=len(combined)
x = datetime.datetime.now()
month=str(x.month)
day=str(x.day)
microsec=str(x.strftime("%f"))
name_file='Combined_Background_mc'+"_"+month+"_"+day+"_"+microsec+"_size="+str(size)
export_pickle_2(combined, name_file, "Background_mc")
size1=266800
size2=104832
size3=128320
chunk_1= combined[:size1]
chunk_2= combined[size1:size1+size2]
chunk_3= combined[size1+size2:size1+size2+size3]
name_file_1='chunk_Background_mc'+"_"+month+"_"+day+"_"+microsec+"_size="+str(size1)
name_file_2='chunk_Background_mc'+"_"+month+"_"+day+"_"+microsec+"_size="+str(size2)
name_file_3="chunk_Background_mc"+"_"+month+"_"+day+"_"+microsec+"_size="+str(size3)
export_pickle_2(chunk_1, name_file_1, "Background_mc")
export_pickle_2(chunk_2, name_file_2, "Background_mc")
export_pickle_2(chunk_3, name_file_3, "Background_mc")
#Third chunk was divided in 3 on the 16/12
back_mc_3=importpickle("../Background_mc/chunk_Background_mc_12_15_919097_size=128320.pkl")
len(back_mc_3)/3
sub_size_1=42773
sub_size_2=42773
sub_size_3=42773
sub_chunk_1= back_mc_3[:sub_size_1]
sub_chunk_2= back_mc_3[sub_size_1:sub_size_1+sub_size_2]
sub_chunk_3= back_mc_3[sub_size_1+sub_size_2:sub_size_1+sub_size_2+sub_size_3]
x = datetime.datetime.now()
month=str(x.month)
day=str(x.day)
microsec=str(x.strftime("%f"))
name_sub_file_1='chunk_Background_mc'+"_"+month+"_"+day+"_"+microsec+"_size="+str(sub_size_1) + "_number1"
name_sub_file_2='chunk_Background_mc'+"_"+month+"_"+day+"_"+microsec+"_size="+str(sub_size_2) + "_number2"
name_sub_file_3="chunk_Background_mc"+"_"+month+"_"+day+"_"+microsec+"_size="+str(sub_size_3)+ "_number3"
export_pickle_2(sub_chunk_1, name_sub_file_1, "Background_mc")
export_pickle_2(sub_chunk_2, name_sub_file_2, "Background_mc")
export_pickle_2(sub_chunk_3, name_sub_file_3, "Background_mc")
sub_size_1+sub_size_2+sub_size_3
size2=104832
size3=128320 |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy
DIRECTORY = "augmented-data/train"
CLASS_MODE = "categorical"
COLOR_MODE = "grayscale"
TARGET_SIZE = (256,256)
BATCH_SIZE = 32
training_data_generator = ImageDataGenerator(
rescale=1.0/255,
zoom_range=0.1,
rotation_range=25,
width_shift_range=0.05,
height_shift_range=0.05)
validation_data_generator = ImageDataGenerator()
training_iterator = training_data_generator.flow_from_directory(DIRECTORY,class_mode='categorical',color_mode='grayscale',batch_size=BATCH_SIZE)#, subset='training')
training_iterator.next()
print("\nLoading validation data...")
validation_iterator = validation_data_generator.flow_from_directory(DIRECTORY,class_mode='categorical', color_mode='grayscale',batch_size=BATCH_SIZE)#, subset='validation')
print("\nBuilding model...")
def design_model(training_data):
model = Sequential()
model.add(tf.keras.Input(shape=(256, 256, 1)))
model.add(layers.Conv2D(5, 5, strides=3, activation="relu"))
model.add(layers.MaxPooling2D(
pool_size=(2, 2), strides=(2,2)))
model.add(layers.Dropout(0.1))
model.add(layers.Conv2D(3, 3, strides=1, activation="relu"))
model.add(layers.MaxPooling2D(
pool_size=(2, 2), strides=(2,2)))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(3,activation="softmax"))
print("\nCompiling model...")
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=.001),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy(),
tf.keras.metrics.AUC()])
model.summary()
return model
model = design_model(training_iterator)
es = EarlyStopping(monitor='val_auc', mode='min', verbose=1, patience=20)
print("\nTraining model...")
history =model.fit(
training_iterator,
steps_per_epoch=training_iterator.samples/BATCH_SIZE, epochs=5,
validation_data=validation_iterator,
validation_steps=validation_iterator.samples/BATCH_SIZE,
callbacks=[es])
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot(history.history['categorical_accuracy'])
ax1.plot(history.history['val_categorical_accuracy'])
ax1.set_title('model accuracy')
ax1.set_xlabel('epoch')
ax1.set_ylabel('accuracy')
ax1.legend(['train', 'validation'], loc='upper left')
# plotting auc and validation auc over epochs
ax2 = fig.add_subplot(2, 1, 2)
ax2.plot(history.history['auc'])
ax2.plot(history.history['val_auc'])
ax2.set_title('model auc')
ax2.set_xlabel('epoch')
ax2.set_ylabel('auc')
ax2.legend(['train', 'validation'], loc='upper left')
plt.show()
test_steps_per_epoch = numpy.math.ceil(validation_iterator.samples / validation_iterator.batch_size)
predictions = model.predict(validation_iterator, steps=test_steps_per_epoch)
test_steps_per_epoch = numpy.math.ceil(validation_iterator.samples / validation_iterator.batch_size)
predicted_classes = numpy.argmax(predictions, axis=1)
true_classes = validation_iterator.classes
class_labels = list(validation_iterator.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
print(report)
cm=confusion_matrix(true_classes,predicted_classes)
print(cm)
|
from collections import OrderedDict
n, m = map(int, input().split())
ips1, ips2 = OrderedDict(), OrderedDict()
for i in range(n):
name, ip = input().split()
ips2[ip] = name
for i in range(m):
name1, ip1 = input().split()
ips1[name1] = ip1
for j in ips1:
for i in ips2:
if ';' in ips1[j] and i + ';' == ips1[j]:
print(j, ips1[j], '#' + ips2[i]) |
import sys, os, time, csv
import pandas as pd
from datetime import date
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
today = date.today().strftime("%B_%d_%Y")
# You have to create a folder with today's date in the dropbox first before you download the file
save_to = os.getcwd() + "/scrape_job_" + today
# os.makedirs(save_to, exist_ok=True)
os.chdir(save_to)
chromeOptions = webdriver.ChromeOptions()
prefs = {"download.default_directory": save_to}
chromeOptions.add_experimental_option("prefs", prefs)
chromeOptions.add_argument("start-maximized")
chromeOptions.add_argument("enable-automation")
chromeOptions.add_argument("--headless")
chromeOptions.add_argument("--no-sandbox")
chromeOptions.add_argument("--disable-infobars")
chromeOptions.add_argument("--disable-dev-shm-usage")
chromeOptions.add_argument("--disable-browser-side-navigation")
chromeOptions.add_argument("--disable-gpu")
driver = webdriver.Chrome(ChromeDriverManager().install(), options = chromeOptions)
driver.set_page_load_timeout(3000)
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per10']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_8.csv', index = False)
os.system('say "Done with dropdown 1-10"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per20']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_9.csv', index = False)
os.system('say "Done with dropdown 11-20"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per30']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_10.csv', index = False)
os.system('say "Done with dropdown 21-30"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per40']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_11.csv', index = False)
os.system('say "Done with dropdown 31-40"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per50']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_12.csv', index = False)
os.system('say "Done with dropdown 41-50"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per60']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_13.csv', index = False)
os.system('say "Done with dropdown 51-60"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per70']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_14.csv', index = False)
os.system('say "Done with dropdown 61-70"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per80']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_15.csv', index = False)
os.system('say "Done with dropdown 71-80"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per90']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_16.csv', index = False)
os.system('say "Done with dropdown 81-90"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per100']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_17.csv', index = False)
os.system('say "Done with dropdown 100"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per110']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_18.csv', index = False)
os.system('say "Done with dropdown greater than 100"')
driver.get("https://nregarep2.nic.in/netnrega/dynamic2/DynamicReport_new4.aspx")
# Select for Expenditure Details
driver.find_element_by_xpath(".//*[contains(text(), 'Total households completed')]").click()
driver.find_element_by_xpath("//select[@id='DdlstTxtBox1']/option[@value='Period_wise_msr.per14']").click()
# Select all the india data
driver.find_element_by_xpath("""//*[@id='regionselect']""").send_keys("Block")
driver.find_element_by_xpath(".//*[contains(text(), ' India')]").click()
# Select year
driver.find_element_by_xpath("""//*[@id='DdlstFinYear']""").send_keys("2020-2021")
# Download the data
driver.find_element_by_xpath("""//*[@id="viewDummy"]""").click()
soup = BeautifulSoup(driver.page_source, 'html.parser')
header = soup.find_all("tbody")[1].find_all("tr")[0]
# print(header)
table = soup.find_all("tbody")[1].find_all("tr")[1:]
list_header = []
for items in header:
try:
# print(items.get_text())
list_header.append(items.get_text())
except:
continue
table_data = []
for elem in table:
sub_data = []
for sub_elem in elem:
try:
sub_data.append(sub_elem.get_text())
except:
continue
table_data.append(sub_data)
data_frame = pd.DataFrame(data = table_data, columns = list_header)
data_frame.to_csv('report_19.csv', index = False)
os.system('say "Done with dropdown greater than 14"')
|
#!/usr/bin/env python3
"""
Takes input JSON output to produce new FAIR JSON
"""
from sys import argv
import json
def MakeDict(oldDict):
retDict = {}
retDict["text"] = oldDict["entity"]
retDict["uri"] = oldDict["uri"]
return retDict
if len(argv) != 5:
print("Usage: --input IN --output OUT")
exit()
else:
fname_in = argv[2]
fname_out = argv[4]
inJson = json.load(open(fname_in))
outJsonList = []
outJson = {}
for elem in inJson:
tempDict = MakeDict(elem)
newElemList = [tempDict]
if elem["type"] == "anatomy":
outJson["organ"] = newElemList
elif elem["type"] == "organism":
outJson["genus_species"] = newElemList
else:
outJson[elem["type"]] = newElemList
outJsonList.append(outJson)
with open(fname_out, "w") as f_out:
json.dump(outJsonList, f_out)
f_out.close()
|
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect,HttpRequest
from nblik.models import Category,Blog,UserProfile,Comment,Follow,Discussion,Discuss,Tag,NblikInfo
from nblik.forms import BlogForm
from django.template.defaultfilters import slugify
from django.contrib.auth import authenticate,login, logout
from django.contrib.auth.decorators import login_required
from datetime import datetime
from django.conf import settings
from registration.backends.simple.views import RegistrationView
from django.contrib.auth.models import User
from datetime import datetime,date,tzinfo,timedelta
from collections import Counter
import json
import facebook
from unidecode import unidecode
from django.core.mail import send_mail
import random
# import cloudinary
# import cloudinary.uploader
# import cloudinary.api
ZERO = timedelta(0)
class UTC(tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
class MyRegistrationView(RegistrationView):
def get(self):
print self
# def modify_content(blog_id):
# b=Blog.objects.get(id=int(blog_id))
# str1=b.blog_content
# repeat=0
# if str1.find('src="/media/blog_uploads')!= -1:
# repeat=1
# i1=str1.find('src="/media/')
# #print i1
# i2=str1.find('/media/blog_uploads/')
# i4=str1.find('style=',i1)
# i5=str1.find('height:',i1)
# i52=str1.find('px',i5)
# i6=str1.find('width:',i5)
# i62=str1.find('px',i6)
# i3=str1.rfind('.',i2,i4)
# #print i2
# #print i3
# num1=int(str1[i5+7:i52])
# num2=int(str1[i6+6:i62])
# #print num1
# #print num2
# str2=str1[i2+20:i3+5].replace('"','')
# str3=str1[i1+5:i3+5].replace('"','')
# #print str2
# #print str3
# context_dict1=cloudinary.uploader.upload('http://protected-tor-4410.herokuapp.com/media/blog_uploads/'+str2,public_id='blog_uploads/'+str2,width = num2, height = num1, crop = 'limit')
# url=context_dict1['url']
# #url='http://res.cloudinary.com/nblik/image/upload/blog_uploads/'+str2 ##
# str4=str1.replace(str3,url)
# #print str4
# b.blog_content=str4
# b.save()
# if repeat==1:
# modify_content(blog_id)
def welcome(request):
if request.user.is_active:
return HttpResponseRedirect('/nblik/')
else:
return render(request,'nblik/welcome.html')
def index(request):
if request.method=="POST":
blog_text=request.POST.get('blog_text')
return render(request,'nblik/add_blog.html',{'blog_text':blog_text})
if request.user.is_active:
blog_list = Blog.objects.order_by('-id')[:25]
print request.user.email
else:
blog_list = Blog.objects.order_by('-id')[:10]
show=[True]*len(blog_list)
try:
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
liked_blogs_list=user2.liked_blogs.all()
##print len(blog_list)
for i in range(0,len(blog_list)):
for lb in liked_blogs_list:
if lb == blog_list[i]:
show[i]=False
break
else:
show[i]=True
except:
pass
context_dict = {}
##print datetime.now()
##print show
context_dict['blogs']=blog_list
comment_matrix=[]
comments_number={}
for blog in blog_list:
comment_matrix.append(Comment.objects.filter(comment_to=blog).order_by('-likes')[:5])
comments_number[blog.title]=len(Comment.objects.filter(comment_to=blog))
context_dict['comments']=comment_matrix
context_dict['comments_number']=comments_number
b_time=[]
for b in blog_list:
days=(datetime.now(utc)-b.datetime_added).days
seconds=(datetime.now(utc) - b.datetime_added).seconds
minutes=seconds/60
hours=minutes/60
if days>= 1:
b_time.append(str(days)+" days ago")
elif minutes>60:
b_time.append(str(hours)+" hours ago")
elif seconds>60:
b_time.append(str(minutes)+" minutes ago")
else:
b_time.append("Just now")
zipped_data=zip(blog_list,b_time,show)
context_dict['zipped_data']=zipped_data
#context_dict['u']=user1
response = render(request,'nblik/index.html',context_dict)
return response
def profile(request):
pass
def category(request,category_name_slug):
context_dict = {}
context_dict['result_list']=None
try:
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name']=category.name
if request.user.is_active:
blog_list = Blog.objects.filter(category=category).order_by('-id')
else:
blog_list = Blog.objects.filter(category=category).order_by('-id')[:10]
context_dict['category']=category
context_dict['category_name_slug']=category.slug
show=[True]*len(blog_list)
try:
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
liked_blogs_list=user2.liked_blogs.all()
##print len(blog_list)
for i in range(0,len(blog_list)):
for lb in liked_blogs_list:
if lb == blog_list[i]:
show[i]=False
break
else:
show[i]=True
show_cat="yes"
for cat in user2.liked_categories.all():
if cat == category:
show_cat=None
break
except:
show_cat=None
b_time=[]
comments_number={}
for b in blog_list:
comments_number[b.title]=len(Comment.objects.filter(comment_to=b))
days=(datetime.now(utc)-b.datetime_added).days
seconds=(datetime.now(utc) - b.datetime_added).seconds
minutes=seconds/60
hours=minutes/60
if days>= 1:
b_time.append(str(days)+" days ago")
elif minutes>60:
b_time.append(str(hours)+" hours ago")
elif seconds>60:
b_time.append(str(minutes)+" minutes ago")
else:
b_time.append("Just now")
context_dict['comments_number']=comments_number
context_dict['show_cat']=show_cat
zipped_data=zip(blog_list,b_time,show)
context_dict['zipped_data']=zipped_data
except Category.DoesNotExist:
context_dict['category_name']=category_name_slug
return render(request,'nblik/category.html',context_dict)
def get_category_list(max_results=0,startswith=''):
cat_list=[]
if startswith=='':
cat_list = Category.objects.all()
elif startswith:
##print "Hello"
cat_list = Category.objects.filter(name__istartswith=startswith)
##print cat_list1
if max_results>0:
if len(cat_list)>max_results:
cat_list=cat_list[:max_results]
return cat_list
def blog(request,blog_title_slug):
#if not request.user.is_active:
# HttpResponseRedirect('/nblik/login_signup/')
b=None
c=None
b_time=None
b=Blog.objects.get(slug=blog_title_slug)
viewer_list=b.viewers.all()
b.views+=1
try:
for viewer in viewer_list:
if viewer==request.user.userprofile:
b.views-=1
b.viewers.remove(viewer)
b.viewers.add(request.user.userprofile)
b.save()
except:
b.views-=1
b.save()
try:
b=Blog.objects.get(slug=blog_title_slug)
##print b.blog_content
c=Comment.objects.filter(comment_to=b).order_by('-likes')
comments_number=len(c)
comment_by_name=[]
for co in c:
u=co.comment_by
up=UserProfile.objects.get(user=u)
comment_by_name.append(up.name)
comments=zip(c,comment_by_name)
except Blog.DoesNotExist:
pass
##print type(b.text)
try:
b=Blog.objects.get(slug=blog_title_slug)
#print b.blog_content
us=request.user
up=UserProfile.objects.get(user=request.user)
liked_comments=up.liked_comments.all()
show=True
show_comment={}
for bl in up.liked_blogs.all():
if bl==b:
show=False
break;
for co in c:
show_comment[co.id]="yes"
for comment in liked_comments:
if comment==co:
show_comment[co.id]=None
break
except:
up=None
us=None
show=None
show_comment=None
##print show_comment
days=(datetime.now(utc)-b.datetime_added).days
seconds=(datetime.now(utc) - b.datetime_added).seconds
minutes=seconds/60
hours=minutes/60
if days>= 1:
b_time=str(days)+" days ago"
elif minutes>60:
b_time=str(hours)+" hours ago"
elif seconds>60:
b_time=str(minutes)+" minutes ago"
else:
b_time="Just now"
return render(request,'nblik/blog.html',{'blog':b,'comments':comments,'b_time':b_time,'show':show,'u':us,'up':up,'comments_number':comments_number,'show_comment':show_comment})
@login_required
def like_category(request):
if request.method=="GET":
category_id=request.GET["category_id"]
category1=Category.objects.get(id=int(category_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
lke_list=user2.liked_categories.all()
for lke in lke_list:
if lke == category1:
return HttpResponse(category1.likes)
category1.likes+=1
category1.save()
user2.liked_categories.add(category1)
user2.save()
return HttpResponse(category1.likes)
def suggest_category(request):
str=request.GET["query_string"]
##print str
result=get_category_list(100,str)
cat_list=[]
for name in result:
cat=Category.objects.get(name=name)
cat_list.append(cat)
##print cat_list
return render(request,'nblik/category_list.html',{'cats':cat_list})
#return HttpResponse(cat_list)
@login_required
def like_blog(request):
##print "Hello"
if request.method=="GET":
blog_id=request.GET["blog_id"]
blog=Blog.objects.get(id=int(blog_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
user_liked=user2.liked_blogs.all()
for blg in user_liked:
if blog == blg:
return HttpResponse(blog.likes)
user2.liked_blogs.add(blog)
user2.save()
blog.likes+=1
blog.save()
return HttpResponse(blog.likes)
@login_required
def like_discussion(request):
##print "Hello"
if request.method=="GET":
discussion_id=request.GET["discussion_id"]
discussion=Discussion.objects.get(id=int(discussion_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
liked_discussions=user2.liked_discussions.all()
for dis in liked_discussions:
if dis == discussion:
return HttpResponse(discussion.likes)
discussion.likes+=1
discussion.save()
user2.liked_discussions.add(discussion)
user2.save()
return HttpResponse(discussion.likes)
@login_required
def like_discuss(request):
##print "Hello"
if request.method=="GET":
discuss_id=request.GET["discuss_id"]
discuss=Discuss.objects.get(id=int(discuss_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
l_list=user2.liked_discusses.all()
for lke in l_list:
if lke == discuss:
return HttpResponse(discuss.likes)
discuss.likes+=1
discuss.save()
user2.liked_discusses.add(discuss)
user2.save()
return HttpResponse(discuss.likes)
@login_required
def like_comment(request):
##print "Hello"
if request.method=="GET":
comment_id=request.GET["comment_id"]
comment=Comment.objects.get(id=int(comment_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
l_list=user2.liked_comments.all()
for lke in l_list:
if lke == comment:
return HttpResponse(comment.likes)
comment.likes+=1
comment.save()
user2.liked_comments.add(comment)
user2.save()
return HttpResponse(comment.likes)
def add_blog(request,category_name_slug):
try:
cat=Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat=None
if request.method=="POST":
if cat:
blog_content=request.POST['blog_content']
blog_title=request.POST['blog_title']
##print type(blog_content)
user = request.user._wrapped if hasattr(request.user,'_wrapped') else request.user
blog1=Blog.objects.get_or_create(title=blog_title,
category=cat,
written_by=request.user,
likes=0,
views=0,
datetime_added=datetime.now(),
text=blog_content
)
blog1=Blog.objects.get(title=blog_title)
#blog1.save()
#return HttpResponse('Hello')
#modify_content(blog1.id)
return blog(request,blog1.slug)
else:
context_dict={'category_list':Category.objects.all(),'category':cat}
return render(request,'nblik/add_blog.html',context_dict)
def add_blog2(request,category_name_slug):
try:
cat=Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat=None
if request.method=="POST":
blog1=BlogForm(request.POST)
if blog1.is_valid():
title = blog1.cleaned_data['title']
blog_content=blog1.cleaned_data['blog_content']
#blog1.save(commit=False)
blog1=Blog(title=title,blog_content=blog_content)
blog1.datetime_added=datetime.now()
blog1.category=cat
blog1.written_by=request.user
blog1.save()
#modify_content(blog1.id)
return blog(request,blog1.slug)
else:
form=BlogForm()
cat=None
blog_list=Blog.objects.all()
context_dict={'category_list':Category.objects.all(),'category':cat,'myform':form,'blog_list':blog_list}
return render(request,'nblik/add_blog.html',context_dict)
else:
form=BlogForm()
blog_list=Blog.objects.all()
context_dict={'category_list':Category.objects.all(),'category':cat,'myform':form,'blog_list':blog_list}
return render(request,'nblik/add_blog2.html',context_dict)
def create_signup_username(signup_name):
u_list = UserProfile.objects.all()
max1=1
found=0
for u in u_list:
if u.name.lower()==signup_name.lower():
found=1
prev_username=u.user.username
lindex=prev_username.rfind("-",0,len(prev_username))
s=Counter(prev_username)
val=s['-']
num=int(prev_username[lindex+1:])
if num>max1:
max1=num
if found==1:
str_num=str(max1+1)
str1 = prev_username[:lindex+1] + str(num+1)
else:
str1=slugify(signup_name)
str1=str1+'-1'
return str1
def login_and_signup(request):
if request.method == 'POST':
login_statement=None
signup_statement=None
login_username_or_email = request.POST.get('login_username_or_email')
if login_username_or_email:
login_password = request.POST.get('login_password')
if not login_username_or_email:
login_statement="Please enter the username"
if not login_password and login_username_or_email:
login_statement ="Please enter the password"
try:
u=User.objects.get(email=login_username_or_email)
login_username=u.username
except:
login_username=login_username_or_email
try:
u=User.objects.get(username=login_username)
except:
u=None
user = authenticate(username = login_username,password=login_password)
if user and login_username and login_password:
if user.is_active:
login(request,user)
return HttpResponseRedirect('/nblik/')
else:
return HttpResponse("Your Nblik account is disabled.")
else:
#print "Invalid login details: {0}, {1}".format(login_username,login_password)
login_statement="Invalid username password combination"
else:
registered = False
signup_name = request.POST.get('signup_name')
signup_email = request.POST.get('signup_email')
signup_password1 = request.POST.get('signup_password1')
signup_password2 = request.POST.get('signup_password2')
register_as = request.POST.get('register_as')
try:
u=User.objects.get(email=signup_email)
signup_statement="Email already registered"
except:
if signup_password1!=signup_password2:
signup_statement="password not same"
else:
signup_username=create_signup_username(signup_name)
user=User.objects.create_user(username=signup_username,email=signup_email)
user.set_password(signup_password1)
user.save()
user1=User.objects.get(username=signup_username)
if register_as=="user":
profile=UserProfile(user=user1,level=1)
profile.name=signup_name
profile.save()
up_follow=Follow(userprofile=user1)
up_follow.save()
else:
company=Company(user=user1)
company.name=signup_name
company.save()
user1 = authenticate(username = signup_username,password=signup_password1)
#user1 = authenticate(username = signup_username,password=signup_password1)
#print user1
login(request,user1)
cat_list=Category.objects.all()
return render(request,'nblik/next_step.html',{'email':signup_email,'name':signup_name,'cat_list':cat_list})
#return render(request,'nblik/signup2.html',{'username':signup_username})
return render(request,'nblik/login.html',{'login_statement':login_statement,'signup_statement':signup_statement,'cat_list':Category.objects.all()})
else:
return render(request,'nblik/login.html',{})
def google_login(request):
if request.method=="POST":
email=request.POST['email']
image_url=request.POST['image_url']
name=request.POST['name']
google_id=request.POST['id']
response_dict={}
try:
u=User.objects.get(email=email)
up=UserProfile.objects.get(user=u)
if up.google_registered:
up.login=1
up.save()
else:
up.google_id=google_id
up.google_registered=True
up.login=1
up.save()
user = authenticate(username = up.user.username,password=up.user.password)
if user:
if user.is_active:
login(request,user)
response_dict.update({'response':"logged in"})
response=HttpResponse(json.dumps(response_dict), content_type='application/javascript')
else:
response_dict.update({'response':"Your Nblik account is disabled."})
response=HttpResponse(json.dumps(response_dict), content_type='application/javascript')
except:
users=User.objects.all()
# for us in users:
# if email == us.email:
# response=HttpResponse(json.dumps(response_dict), content_type='application/javascript')
# return response
signup_username=create_signup_username(name)
user=User.objects.create_user(username=signup_username,email=email)
user.set_password("password")
user.save()
user1=User.objects.get(username=signup_username)
#print "user1=", user1
profile=UserProfile(user=user1,level=1)
profile.name=name
profile.google_id=google_id
profile.google_registered=True
profile.login=1
profile.save()
up_follow=Follow(userprofile=user1)
up_follow.save()
user1 = authenticate(username = signup_username,password="password")
#user1 = authenticate(username = signup_username,password=signup_password1)
login(request,user1)
response_dict.update({'response':'logged_in'})
response=HttpResponse(json.dumps(response_dict), content_type='application/javascript')
return response
def search_top(request):
q_str=request.GET["query_string"]
##print str
result=get_category_list(100,q_str)
cat_list=[]
for name in result:
cat=Category.objects.get(name=name)
cat_list.append(cat)
blog_list=Blog.objects.all()
b_list=[]
for blog in blog_list:
title=blog.title
for word in title.split():
if word.lower().startswith(q_str.lower()):
b_list.append(blog)
break
user_list = UserProfile.objects.all()
u_list=[]
for u in user_list:
if u.name.lower().startswith(q_str.lower()):
u_list.append(u)
##print cat_list,b_list,u_list
##print cat_list
context_dict={}
context_dict['cats']=cat_list
context_dict['blogs']=b_list
context_dict['users']=u_list
##print context_dict
#return HttpResponse("Results")
return render(request,"nblik/search_results.html", context_dict)
#return HttpResponse(cat_list)
def user_logout(request):
logout(request)
#print "Hello"
return HttpResponseRedirect('/nblik/')
def follow_user(request):
if request.method=="GET":
user_id=request.GET["user_id"]
userprofile=UserProfile.objects.get(id=int(user_id))
u=User.objects.get(username=userprofile.user.username)
up_follow=Follow.objects.get(userprofile=u)
current_up_follow=Follow.objects.get(userprofile=request.user)
for follo in current_up_follow.followed.all():
if userprofile==follo:
return HttpResponse("NotFollowed")
up_follow.followers=up_follow.followers+1
current_up_follow.followed.add(userprofile)
current_up_follow.no_followed=current_up_follow.no_followed+1
current_up_follow.save()
up_follow.save()
return HttpResponse("followed")
def dashboard(request,username):
context_dict={}
show='yes'
try:
user_m=User.objects.get(username=username)
userprofile=UserProfile.objects.get(user=user_m)
blog_list=Blog.objects.filter(written_by=user_m).order_by('-id')[:20]
discussion_list=Discussion.objects.filter(started_by=userprofile).order_by('-id')[:20]
cat_list=userprofile.liked_categories.all()
try:
follow=Follow.objects.get(userprofile=request.user)
for foll in follow.followed.all():
if foll==userprofile:
show=None
break
except:
pass
if request.user.is_active:
pass
else:
show=None
except:
user_m=None
userprofile=None
try:
followed_tags=userprofile.followed_tags.all()
except:
followed_tags=None
try:
userprofile_follow=Follow.objects.get(userprofile=user_m)
followed_list=userprofile_follow.followed.all()
followers=userprofile.follow_set.all()
except:
userprofile_follow=None
followed_list=None
followers=None
if userprofile.languages == 1:
lang='English'
if userprofile.languages == 2:
lang='Hindi'
if userprofile.languages == 3:
lang='English and Hindi'
context_dict['show']=show
context_dict['cat_list']=cat_list
context_dict['user']=request.user
context_dict['user_m']=user_m
context_dict['userprofile']=userprofile
context_dict['userprofile_follow']=userprofile_follow
context_dict['followed_tags']=followed_tags
context_dict['followed_list']=followed_list
context_dict['followers']=followers
context_dict['blogs']=blog_list
context_dict['discussions']=discussion_list
context_dict['lang']=lang
context_dict['l_blogs']=len(blog_list)
context_dict['l_dis']=len(discussion_list)
return render(request,'nblik/dashboard.html',context_dict)
def comment(request):
if request.method=="GET":
##print "Hello"
blog_id=request.GET["blog_id"]
##print blog_id
user_id=request.GET["user_id"]
##print user_id
comment_text=request.GET["comment_text"]
b=Blog.objects.get(id=int(blog_id))
##print b
u=User.objects.get(id=int(user_id))
##print b,u
c=Comment.objects.get_or_create(comment_text=comment_text,comment_by=u,comment_to=b,likes=0)
return HttpResponse(0)
def discussions(request):
if request.user.is_active:
discussions=Discussion.objects.order_by('-id')
else:
discussions=Discussion.objects.order_by('-id')[:10]
return render(request,'nblik/discussions.html',{'discussions':discussions})
def discussion_2(request,category_slug):
cat=Category.objects.get(slug=category_slug)
if request.user.is_active:
user2=UserProfile.objects.get(user=request.user)
discussions=Discussion.objects.filter(category=cat).order_by('-id')[:25]
show_cat="yes"
for cate in user2.liked_categories.all():
if cat == cate:
show_cat=None
break
print show_cat
else:
discussions=Discussion.objects.filter(category=cat).order_by('-id')[:10]
return render(request,'nblik/discussions.html',{'discussions':discussions,'category':cat,'show_cat':show_cat})
def new_discussion(request):
if request.method=='POST':
user=request.user
user_pro=UserProfile.objects.get(user=user)
topic=request.POST.get('discuss_topic')
intro=request.POST.get('discuss_intro').replace("\r\n","<br />").replace("\r","<br />").replace("\n","<br />")
cat=request.POST.get('category')
category=Category.objects.get(id=int(cat))
discussion=Discussion(topic=topic,started_by=user_pro,category=category,intro=intro)
discussion.save()
discuss_list=discussion.discuss_set.all()
return render(request,'nblik/discussion.html',{'discuss_list':discuss_list,'up':user_pro,'discussion':discussion})
else:
context_dict={}
context_dict['categories']=Category.objects.all()
return render(request,'nblik/new_discussion.html',context_dict)
def discussion(request,discussion_slug):
d=Discussion.objects.get(slug=discussion_slug)
discuss_list=d.discuss_set.all()
up=UserProfile.objects.get(user=request.user)
liked_discussions=up.liked_discussions.all()
liked_discusses=up.liked_discusses.all()
show="yes"
show_discuss={}
for dcsns in liked_discussions:
if dcsns==d:
show=None
break
for discuss in discuss_list:
show_discuss[discuss.id]="yes"
for dscs in liked_discusses:
if dscs==discuss:
show_discuss[discuss.id]=None
break
##print discuss_list
##print "\n"
##print show_discuss
return render(request,'nblik/discussion.html',{'discuss_list':discuss_list,'up':up,'discussion':d,'show':show,'show_discuss':show_discuss})
def discuss(request):
if request.method=="GET":
#print "Hello"
discussion_id=request.GET["discussion_id"]
#print discussion_id
up_id=request.GET["user_id"]
#print up_id
discuss_text=request.GET["discuss_text"]
dn=Discussion.objects.get(id=int(discussion_id))
#print dn
up=UserProfile.objects.get(id=int(up_id))
#print up
d=Discuss.objects.get_or_create(discuss_text=discuss_text,discuss_by=up,discuss_on=dn,likes=0)
return HttpResponse(0)
def next_step(request):
u=request.user
if request.method=="POST":
try:
up=UserProfile.objects.get(user=u)
except:
up=UserProfile(user=u,level=1)
up.google_registered=True
up.save()
up_follow=Follow(userprofile=u)
up_follow.save()
name = request.POST.get('name')
email = request.POST.get('email')
dob_date = request.POST.get('dob_date')
dob_month = request.POST.get('dob_month')
dob_year = request.POST.get('dob_year')
who=request.POST.get('who')
lives_in=request.POST.get('lives_in')
from_place=request.POST.get('from_place')
if len(request.FILES) != 0:
up.picture = request.FILES['picture']
##print profile_pic_url
languages=request.POST.get('language')
profile_tagline=request.POST.get('profile_tag')
liked_category_ids=request.POST.getlist('category')
for i in liked_category_ids:
cat=Category.objects.get(id=int(i))
up.liked_categories.add(cat)
up.name=name
up.date_registered=datetime.now()
up.who=who
up.lives_in=lives_in
up.from_place=from_place
try:
up.dob_date=int(dob_date)
up.dob_month=int(dob_month)
up.dob_year=int(dob_year)
up.languages=int(languages)
except:
pass
up.profile_tag_line=profile_tagline
up.save()
return HttpResponseRedirect('/nblik/')
else:
#try:
# up=UserProfile.objects.get(user=u)
# return HttpResponseRedirect('/nblik/')
#except:
context_dict={}
cat_list=Category.objects.all()
context_dict['cat_list']=cat_list
return render(request,'nblik/next_step.html',context_dict)
def quick_add_blog(request):
if request.method=="POST":
blog_text=request.POST.get('quick_blog_text')
form=BlogForm()
context_dict={}
context_dict['category_list'] = Category.objects.all()
context_dict['quick_blog_text']= (str(blog_text)).replace("\r\n","<br />")
#print blog_text
context_dict['category']= None
context_dict['myform']=form
context_dict['blog_list']=Blog.objects.all()
return render(request,'nblik/add_blog2.html',context_dict)
# def discussion_to_post(id1):
# dis=Discussion.objects.get(id=int(id1))
# text="""<div id="main" style="min-width:90%;padding:10px;margin:5px;border:1px solid black;">
# <img src="https://elasticbeanstalk-us-west-2-031348677605.s3.amazonaws.com/images/nblik.jpg" style="height:60px;margin:10px;margin-bottom:2px;border:2px solid rgb(70, 94, 170);-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;">
# <font style="font-size:14px;color:rgb(70,94,170);margin-left:0;">Network of Knowledge</font>
# <br>
# <div id="data" style="font-weight:bold;padding:5px;font-family:georgia;font-size:24px;">
# <a href="/nblik/discussion/"""+dis.slug+"""/" style="color:inherit;text-decoration:none;">
# """+dis.topic+"""
# </a>
# </div>
# </div>
# """
# return text
def post_to_facebook(request,blog_id):
blog=Blog.objects.get(id=blog_id)
name=blog.title
cap='NbliK - Network of Knowledge (Blog)'
link='http://nblik.com/nblik/blog/'+blog.slug+'/'
src='https://elasticbeanstalk-us-west-2-031348677605.s3.amazonaws.com/images/nblik.jpg'
user = request.user
auth = user.social_auth.first()
graph = facebook.GraphAPI(auth.extra_data['access_token'])
graph.put_wall_post(message='NbliK - Network of Knowledge', attachment = {'name': name,'link': link,'caption': cap,'picture': src})
return HttpResponseRedirect('/nblik/')
def post_to_facebook_discussion(request,discussion_id):
discussion=Discussion.objects.get(id=discussion_id)
name=discussion.topic
cap='NbliK - Network of Knowledge (Discussion)'
link='http://nblik.com/nblik/discussion/'+discussion.slug+'/'
src='https://elasticbeanstalk-us-west-2-031348677605.s3.amazonaws.com/images/nblik.jpg'
des=discussion.intro
user = request.user
auth = user.social_auth.first()
graph = facebook.GraphAPI(auth.extra_data['access_token'])
graph.put_wall_post(message='NbliK - Network of Knowledge', attachment = {'name': name,'link': link,'caption': cap,'description' : des ,'picture': src})
return HttpResponseRedirect('/nblik/')
def blog_title(request):
title=slugify(unidecode(request.GET['blog_title']))
blogs=Blog.objects.all()
resp="good"
for blog in blogs:
if blog.slug==title:
resp="error"
break
return HttpResponse(resp)
def discussion_topic(request):
title=slugify(unidecode(request.GET['discussion_topic']))
discussions=Discussion.objects.all()
resp="good"
for disc in discussions:
if disc.slug == title:
resp="error"
break
return HttpResponse(resp)
def edit_language(request):
user=request.user
language=request.GET["language"]
u=UserProfile.objects.get(user=user)
u.languages=int(language)
u.save()
return HttpResponse('Done')
def edit_blog(request,blog_slug):
blog=Blog.objects.get(slug=blog_slug)
context_dict={}
context_dict['blog_text']=(blog.blog_content).replace("\r\n","")
context_dict['blog_title']=(blog.title)
context_dict['blog_category']=str(blog.category.slug)
context_dict['category_list']=Category.objects.all()
form=BlogForm()
context_dict['myform']=form
context_dict['blog_id']=str(blog.id)
return render(request,'nblik/edit_blog.html',context_dict)
def update_blog(request,category_name_slug):
try:
cat=Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat=None
if request.method=="POST":
blog_f=BlogForm(request.POST)
if blog_f.is_valid():
title = blog_f.cleaned_data['title']
blog_content=blog_f.cleaned_data['blog_content']
blog_id=request.POST.get('blog_id')
blog1=Blog.objects.get(id=int(blog_id))
blog1.title=title
blog1.blog_content=blog_content
blog1.datetime_added=datetime.now()
blog1.category=cat
blog1.written_by=request.user
blog1.save()
#modify_content(blog1.id)
return blog(request,blog1.slug)
else:
blog1=Blog.objects.get(id=int(request.POST.get('blog_id')))
blog_slug=blog1.slug
return edit_blog(request,blog_slug)
def delete_blog(request,blog_slug):
blog=Blog.objects.get(slug=blog_slug)
blog.delete()
return HttpResponseRedirect('/nblik/')
def edit_profile(request):
user=request.user
userpro=UserProfile.objects.get(user=user)
context_dict={}
context_dict['userpro']=userpro
context_dict['name']=str(userpro.name)
context_dict['tag']=str(userpro.profile_tag_line)
context_dict['language']=str(userpro.languages)
context_dict['date']=str(userpro.dob_date)
context_dict['month']=str(userpro.dob_month)
context_dict['year']=str(userpro.dob_year)
context_dict['pic']=str(userpro.picture.url)
return render(request,'nblik/edit_profile.html',context_dict)
def update_profile(request):
user=request.user
userpro=UserProfile.objects.get(user=user)
userpro.name=request.POST.get('name')
try:
userpro.dob_date=int(request.POST.get('dob_date'))
userpro.dob_month=int(request.POST.get('dob_month'))
userpro.dob_year=int(request.POST.get('dob_year'))
userpro.languages=int(request.POST.get('language'))
except:
pass
userpro.profile_tag_line=request.POST.get('profile_tag')
userpro.who=request.POST.get('who')
userpro.lives_in=request.POST.get('lives_in')
userpro.from_place=request.POST.get('from_place')
if len(request.FILES) != 0:
userpro.picture = request.FILES['picture']
##print profile_pic_url
userpro.save()
return HttpResponseRedirect('/'+str(user)+'/')
def nblik_info(request,nblik_slug):
nblik_o=NblikInfo.objects.get(slug=nblik_slug)
return render(request,'nblik/nblik_info.html',{'nblik_o':nblik_o})
def edit_discussion(request,discussion_slug):
discussion=Discussion.objects.get(slug=discussion_slug)
context_dict={}
context_dict['discussion']=discussion
context_dict['discussion_intro']=(discussion.intro).replace("<br />","\r\n").replace("<br>","\r\n")
context_dict['discussion_topic']=(discussion.topic)
context_dict['discussion_category']=discussion.category
context_dict['category_list']=Category.objects.all()
context_dict['discussion_id']=str(discussion.id)
#print discussion.category.slug
return render(request,'nblik/edit_discussion.html',context_dict)
def delete_discussion(request,discussion_slug):
discussion1=Discussion.objects.get(slug=discussion_slug)
discussion1.delete()
return HttpResponseRedirect('/nblik/')
def update_discussion(request):
if request.method=="POST":
topic = request.POST.get('discuss_topic')
intro = request.POST.get('discuss_intro')
cat=request.POST.get('category')
d_id=request.POST.get('discussion_id')
discussion1=Discussion.objects.get(id=int(d_id))
discussion1.topic=topic
discussion1.intro=intro.replace("\r\n","<br />").replace("\r","<br />").replace("\n","<br />")
discussion1.category=Category.objects.get(slug=cat)
discussion1.save()
return discussion(request,discussion1.slug)
def unlike_blog(request):
##print "Hello"
if request.method=="GET":
blog_id=request.GET["blog_id"]
blog=Blog.objects.get(id=int(blog_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
for blg in user2.liked_blogs.all():
if blg==blog:
user2.liked_blogs.remove(blog)
user2.save()
blog.likes-=1
blog.save()
return HttpResponse(blog.likes)
def unlike_discussion(request):
##print "Hello"
if request.method=="GET":
discussion_id=request.GET["discussion_id"]
discussion=Discussion.objects.get(id=int(discussion_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
for dis in user2.liked_discussions.all():
if dis==discussion:
user2.liked_discussions.remove(discussion)
user2.save()
discussion.likes-=1
discussion.save()
return HttpResponse(discussion.likes)
def unlike_comment(request):
##print "Hello"
if request.method=="GET":
comment_id=request.GET["comment_id"]
comment=Comment.objects.get(id=int(comment_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
for comm in user2.liked_comments.all():
if comm==comment:
user2.liked_comments.remove(comment)
user2.save()
comment.likes-=1
comment.save()
return HttpResponse(comment.likes)
def unlike_discuss(request):
##print "Hello"
if request.method=="GET":
discuss_id=request.GET["discuss_id"]
discuss=Discuss.objects.get(id=int(discuss_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
for dis in user2.liked_discusses.all():
if discuss==dis:
user2.liked_discusses.remove(discuss)
user2.save()
discuss.likes-=1
discuss.save()
return HttpResponse(discuss.likes)
def unlike_category(request):
if request.method=="GET":
category_id=request.GET["category_id"]
category1=Category.objects.get(id=int(category_id))
user1=User.objects.get(username=request.user)
user2=UserProfile.objects.get(user=user1)
for cat in user2.liked_categories.all():
if category1==cat:
user2.liked_categories.remove(category1)
user2.save()
category1.likes-=1
category1.save()
break
return HttpResponse(category1.likes)
def unfollow_user(request):
if request.method=="GET":
user_id=request.GET["user_id"]
u=User.objects.get(id=int(user_id))
userprofile=UserProfile.objects.get(user=u)
##print u
up_follow=Follow.objects.get(userprofile=u)
##print up_follow
up_follow.followers=up_follow.followers-1
##print up_follow.followers
current_up_follow=Follow.objects.get(userprofile=request.user)
##print current_up_follow
current_up_follow.followed.remove(userprofile)
#print current_up_follow.no_followed
current_up_follow.no_followed=current_up_follow.no_followed-1
#print current_up_follow.no_followed
##print current_up_follow.no_followed
current_up_follow.save()
up_follow.save()
##print request.user
return HttpResponse(current_up_follow.no_followed)
def viewed(request):
id1=request.GET["blog_id"]
if request.user.is_active:
b=Blog.objects.get(id=int(id1))
viewer_list=b.viewers.all()
b.views+=1
for viewer in viewer_list:
if viewer==request.user.userprofile:
b.views-=1
b.viewers.remove(viewer)
b.viewers.add(request.user.userprofile)
b.save()
return HttpResponse(str(b.views))
def change_email(request,username):
if request.method=="POST":
email=request.POST.get('email')
password=request.POST.get('password')
us=User.objects.get(username=username)
if us == authenticate(username=username,password=password):
us.email=email
us.save()
return HttpResponseRedirect('/'+str(us)+'/')
else:
context_dict={}
context_dict['email']=str(us.email)
return render(request,'nblik/change_email_error.html',context_dict)
else:
us=User.objects.get(username=username)
context_dict={}
context_dict['email']=str(us.email)
user_pro=UserProfile.objects.get(user=us)
if user_pro.google_registered == True :
return HttpResponseRedirect('/nblik/')
return render(request,'nblik/change_email.html',context_dict)
def reset_password(request):
if request.method=="POST":
email1=request.POST.get('email')
for us in User.objects.all():
if us.email == email1:
u_pro=UserProfile.objects.get(user=us)
if u_pro.google_registered == True:
return HttpResponseRedirect('/nblik/login_signup/')
pass_new=str(random.randrange(1000000,100000000))
us.set_password(pass_new)
us.save()
to_email=str(email1)
subject="Password Reset"
text="Hello,<br> Your new password is <strong>"+pass_new+" </strong>. <br>Please <a href='http://www.nblik.com/nblik/login_signup/'>SignIn</a> and change your password soon.<br>Thank You"
text1=""
#try:
#print settings.EMAIL_HOST_USER
send_mail(subject, text1, settings.EMAIL_HOST_USER,[to_email], fail_silently=False, html_message=text)
#except:
#return HttpResponseRedirect('/nblik/password_reset_error/')
return HttpResponseRedirect('/accounts/password/reset/done/')
return HttpResponseRedirect('/nblik/password_reset_error/')
else:
return HttpResponseRedirect('/accounts/password/reset/')
def password_reset_error(request):
return render(request,'registration/password_reset_error.html',{})
# to_email="abc@xyz.com"
# subject="abc"
# text="Hello World"
# send_mail(subject, text, settings.EMAIL_HOST_USER,[to_email], fail_silently=False)
# remember [to_email] is a list
# also you will need to add double protection to your gmail account from which you are sending email
|
from pytorchcv.model_provider import get_model as ptcv_get_model
import torch
from torch.autograd import Variable
net = ptcv_get_model("resnet18", pretrained=True)
x = Variable(torch.randn(1, 3, 224, 224))
y = net(x)
print(y)
print(type(net)) |
import discord
import sqlite3
import datetime
from datetime import datetime, timedelta
from discord import Embed
from discord.ext import commands
class Logging(commands.Cog):
"""Guild logging module"""
def __init__(self, bot):
self.bot = bot
@commands.group(invoke_without_command=True)
@commands.has_role('Mods')
async def logging(self, ctx):
"""Sets guilds log channel. Use:<p>logging channel <channel>"""
await ctx.send('Available setup commands: \nlogging channel <channel>')
@logging.command()
async def channel(self, ctx, channel: discord.TextChannel):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {ctx.guild.id}")
result = cursor.fetchone()
if result is None:
sql = ("INSERT INTO logs(guild_id, channel_id) VALUES(?, ?)")
val = (ctx.guild.id, channel.id)
await ctx.send(f"Logging channel has been set to {channel.mention}")
elif result is not None:
sql = ("UPDATE logs SET channel_id = ? WHERE guild_id = ?")
val = (channel.id, ctx.guild.id)
await ctx.send(f"Channel for logs channel has been updated to {channel.mention}")
cursor.execute(sql, val)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_message_delete(self, message):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {message.guild.id}")
result = cursor.fetchone()
before = discord.AuditLogEntry.before
channel = self.bot.get_channel(id=int(result[0]))
botrole = discord.utils.get(message.author.roles, name='Bots')
if message.author == self.bot.user:
return
if botrole in message.author.roles:
return
embed = discord.Embed(timestamp=datetime.utcnow(), colour=discord.Colour.from_rgb(237, 42, 45))
embed.set_author(name=f'{message.author}')
embed.set_thumbnail(url=f'{message.author.avatar_url}')
embed.add_field(name=f"Message sent by:",
value=f"{message.author.mention} was deleted in {message.channel.mention}", inline=False)
embed.add_field(name="Deleted message:", value=f"\n\u200b {message.content}", inline=False)
embed.set_footer(text=f'User ID: {message.author.id}')
await channel.send(embed=embed)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_message_edit(self, before, after):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {before.guild.id}")
result = cursor.fetchone()
channel = self.bot.get_channel(id=int(result[0]))
member = discord.Member
if member.bot:
return
embed = discord.Embed(description=f"Message edited by {before.author.mention} in {before.channel}",
timestamp=datetime.utcnow(),
colour=discord.Colour.from_rgb(247, 205, 66))
embed.set_author(name=f'{before.author}', icon_url=f'{before.author.avatar_url}')
embed.set_thumbnail(url=f'{before.author.avatar_url}')
embed.add_field(name="Original message", value="\n\u200b" + before.content, inline=False)
embed.add_field(name="Edited message", value="\n\u200b" + after.content, inline=False)
embed.add_field(name="\n\u200b", value=f"[Jump to message.]({after.jump_url})")
embed.set_footer(text=f'User ID: {before.author.id}')
await channel.send(embed=embed)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_member_join(self, member):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {member.guild.id}")
result = cursor.fetchone()
channel = self.bot.get_channel(id=int(result[0]))
embed = discord.Embed(title="Member Joined", timestamp=datetime.utcnow(), color=discord.Colour.from_rgb(47, 216, 109))
embed.set_thumbnail(url=f'{member.avatar_url}')
embed.add_field(name="\n\u200b", value=member.mention + " has joined " + member.guild.name, inline=False)
embed.add_field(name="Account created at:", value=member.created_at.strftime("%a, %#d %B %Y, %I:%M %p UTC"))
embed.set_footer(text=f'User ID: {member.id}')
await channel.send(embed=embed)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_member_remove(self, member):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {member.guild.id}")
result = cursor.fetchone()
channel = self.bot.get_channel(id=int(result[0]))
embed = discord.Embed(title="Member Left", timestamp=datetime.utcnow(),
color=0xDD2222)
embed.set_thumbnail(url=f'{member.avatar_url}')
embed.add_field(name="\n\u200b", value=member.mention + " has left " + member.guild.name, inline=False)
embed.set_footer(text=f'User ID: {member.id}')
await channel.send(embed=embed)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_member_ban(self, ctx, member: discord.User, reason):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {ctx.guild.id}")
result = cursor.fetchone()
channel = self.bot.get_channel(id=int(result[0]))
embed = discord.Embed(title="Member banned",
description=f"{member.mention} was banned for {reason}",
timestamp=datetime.utcnow(),
color=0xDD2222)
embed.set_footer(text=f'User ID: {member.id} / Auctioned by: {ctx.author.mention}')
await channel.send(embed=embed, reason=reason)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_member_kick(self, ctx, member, reason):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {member.guild.id}")
result = cursor.fetchone()
channel = self.bot.get_channel(id=int(result[0]))
embed = discord.Embed(title="Member banned",
description=f"{member.mention} was kicked for {reason}",
timestamp=datetime.utcnow(),
color=0xDD2222)
embed.set_footer(text=f'User ID: {member.id} / Auctioned by: {ctx.author.mention}')
await channel.send(embed=embed)
await channel.send(embed=embed, reason=reason)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()
async def on_member_unban(self, member):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {member.guild.id}")
result = cursor.fetchone()
channel = self.bot.get_channel(id=int(result[0]))
embed = discord.Embed(title="Member was unbanned", timestamp=datetime.utcnow(),
color=discord.Colour.from_rgb(237, 42, 45))
embed.set_thumbnail(url=f'{member.avatar_url}')
embed.add_field(name="\n\u200b", value=member.mention + " was unbanned from" + member.guild.name,
inline=False)
embed.set_footer(text=f'User ID: {member.id}')
await channel.send(embed=embed)
db.commit()
cursor.close()
db.close()
@commands.Cog.listener()#from carberra
async def on_member_update(self, before, after):
db = sqlite3.connect('journal3.db')
cursor = db.cursor()
cursor.execute(f"SELECT channel_id FROM logs WHERE guild_id = {before.guild.id}")
result = cursor.fetchone()
log_channel = self.bot.get_channel(id=int(result[0]))
if before.display_name != after.display_name:
embed = discord.Embed(title="Nickname change",
colour=after.colour,
timestamp=datetime.utcnow())
fields = [("Before", before.display_name, False),
("After", after.display_name, False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await log_channel.send(embed=embed)
elif before.roles != after.roles:
embed = discord.Embed(title="Role updates",
colour=after.colour,
timestamp=datetime.utcnow())
fields = [("Before", ", ".join([r.mention for r in before.roles]), False),
("After", ", ".join([r.mention for r in after.roles]), False)]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await log_channel.send(embed=embed)
db.commit()
cursor.close()
db.close()
def setup(bot):
bot.add_cog(Logging(bot))
print('logging module loaded')
|
from os.path import join
from .dataset import DatasetFromFolder, DatasetFromFolder_simplified, DatasetFromFolder_in_test_mode
from torch.utils.data import DataLoader
def get_training_set(root_dir, direction):
train_dir = join(root_dir, "train")
return DatasetFromFolder(train_dir, direction,'train')
def get_test_set(root_dir, direction):
test_dir = join(root_dir, "test")
return DatasetFromFolder(test_dir, direction,'test')
def get_training_set_simplified(root_dir):
train_dir = join(root_dir, "train")
return DatasetFromFolder_simplified(train_dir)
def get_test_set_simplified(root_dir):
test_dir = join(root_dir, "test")
return DatasetFromFolder_simplified(test_dir)
def get_test_images_in_test_mode(opt):
'''
Here only give the path to the test imgs. No need to maunally pair the image
into dataset format.
:param opt:
:return: test_data_loader.
'''
test_set = DatasetFromFolder_in_test_mode(opt.test_img_folder)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads,
batch_size=opt.batch_size, shuffle=False)
print('Validation images: {}'.format(len(test_set)))
return testing_data_loader
def get_dataset_loader(opt):
if opt.input_nc == 1:
print('GRAY imgs')
train_set = get_training_set_simplified(opt.dataset_path)
test_set = get_test_set_simplified(opt.dataset_path)
elif opt.input_nc == 3:
print('RGB imgs')
train_set = get_training_set(opt.dataset_path, 'source2target')
test_set = get_test_set(opt.dataset_path, 'source2target')
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads,
batch_size=opt.batch_size, shuffle=False)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads,
batch_size=opt.batch_size, shuffle=False)
print('Training images: {}'.format(len(train_set)))
print('Validation images: {}'.format(len(test_set)))
return training_data_loader,testing_data_loader |
class SpaceAge:
Time={
"Earth":31558149.76,
"Mercury":7600530.24,
"Venus":19413907.2
"Mars":59354294.4,s
"Jupiter":374335776.0,
"Saturn":929596608.0,
"Uranus":2661041808.0,
"Neptune":5200418592.0
}
def __init__(self, age):
self.age= age
def on_earth(self):
orbital_earth = self.age
return round(orbital_earth/self.Time["Earth"], 2)
def on_mercury(self):
orbital_earth = self.age
return round(orbital_mercury/self.Time["Mercury"], 2)
def on_venus(self):
orbital_earth = self.age
return round(orbital_venus/self.Time["Venus"])
def on_mars(self):
orbital_earth = self.age
return round(orbital_mars/self.Time["Mars"])
def on_jupiter(self):
orbital_earth = self.age
return round(orbital_jupiter/self.Time["Jupiter"])
def on_saturn(self):
orbital_earth = self.age
return round(orbital_saturn/self.Time["Saturn"])
def on_uranus(self):
orbital_earth = self.age
return round(orbital_uranus/self.Time["Uranus"])
def on_neptune(self):
orbital_earth = self.age
return round(orbital_neptune/self.Time["Neptune"])
|
from django.db import models
from django.contrib.auth.models import User
# Model for a website user, contains various fields of information about them
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
First_Name = models.CharField(max_length=20)
Last_Name = models.CharField(max_length=20)
Email = models.EmailField(max_length=120)
Phone = models.CharField(max_length=20)
Organisation = models.CharField(max_length=100)
def __str__(self):
return self.user.username
|
class Node:
def __init__(self, data, next=None):
self.data = data
self.next = next
def equal(left_node, right_node):
left_none, right_none = left_node is None, right_node is None
if left_none and right_none:
return True
elif left_none != right_none:
return False
else:
return (left_node.data == right_node.data
and equal(left_node.next, right_node.next))
def string(node):
if node is None:
return str(None)
else:
return f"Node({node.data}, {string(node.next)})"
'''
Sum two linked lists where 321 is represented as 1 -> 2 -> 3
'''
def next(node):
if node is None:
return None
else:
return node.next
def data(node):
if node is None:
return 0
else:
return node.data
def compute_sum(left, right):
index, residue, result = 0, 0, 0
while left is not None or right is not None or residue > 0:
step_sum = data(left) + data(right) + residue
result += (step_sum % 10) * (10 ** index)
residue = step_sum // 10
index += 1
left, right = next(left), next(right)
head = Node(result % 10)
result //= 10
tmp = head
while result > 0:
tmp.next = Node(result % 10)
tmp = tmp.next
result //= 10
return head
left = Node(9, next=Node(1, next=Node(9, next=Node(9))))
right = Node(4, next=Node(5, next=Node(2, next=Node(5))))
print(string(compute_sum(left, right))) |
from toolz.functoolz import (thread_first, thread_last, memoize, curry,
compose, pipe)
from operator import add, mul
from toolz.utils import raises
import itertools
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def inc(x):
return x + 1
def double(x):
return 2 * x
def test_thread_first():
assert thread_first(2) == 2
assert thread_first(2, inc) == 3
assert thread_first(2, inc, inc) == 4
assert thread_first(2, double, inc) == 5
assert thread_first(2, (add, 5), double) == 14
def test_thread_last():
assert list(thread_last([1, 2, 3], (map, inc), (filter, iseven))) == [2, 4]
def test_memoize():
fn_calls = [0] # Storage for side effects
def f(x, y):
""" A docstring """
fn_calls[0] += 1
return x + y
mf = memoize(f)
assert mf(2, 3) == mf(2, 3)
assert fn_calls == [1] # function was only called once
assert mf.__doc__ == f.__doc__
def test_curry_simple():
cmul = curry(mul)
double = cmul(2)
assert callable(double)
assert double(10) == 20
cmap = curry(map)
assert list(cmap(inc)([1, 2, 3])) == [2, 3, 4]
def test_curry_kwargs():
def f(a, b, c=10):
return (a + b) * c
f = curry(f)
assert f(1, 2, 3) == 9
assert f(1)(2, 3) == 9
assert f(1, 2) == 30
assert f(1, c=3)(2) == 9
assert f(c=3)(1, 2) == 9
def test_curry_passes_errors():
@curry
def f(a, b):
if not isinstance(a, int):
raise TypeError()
return a + b
assert f(1, 2) == 3
assert raises(TypeError, lambda: f('1', 2))
assert raises(TypeError, lambda: f('1')(2))
assert raises(TypeError, lambda: f(1, 2, 3))
def test_curry_docstring():
def f(x, y):
""" A docstring """
return x
g = curry(f)
assert g.__doc__ == f.__doc__
assert str(g) == str(f)
def test_compose():
assert compose()(0) == 0
assert compose(inc)(0) == 1
assert compose(double, inc)(0) == 2
assert compose(str, iseven, inc, double)(3) == "False"
assert compose(str, add)(1, 2) == '3'
def f(a, b, c=10):
return (a + b) * c
assert compose(str, inc, f)(1, 2, c=3) == '10'
def test_pipe():
assert pipe(1, inc) == 2
assert pipe(1, inc, inc) == 3
assert pipe(1, double, inc, iseven) == False
|
#Exemplo de classe com decoração
class Pessoa:
def __init__(self, nome, idade):
self._nome = nome
self._idade = idade
@propertyidade
def idade (self):
return self._idade
@idade.setter
def idade (self, idade):
self._idade = idade
@propertynome
def nome (self):
return self._nome
@nome.setter
def nome (self, nome):
self._nome = nome
#Exemplo de classe sem decoração
class Animal:
def __init__(self, apelido, patas):
self._apelido = apelido
self._patas = patas
def _get_apelido(self):
return self._apelido
def _get_patas(self):
return self._patas
def _set_apelido(self, apelido):
self._apelido = apelido
def _set_patas(self, patas):
self._patas = patas
nome = property(_set_nome, _get_nome)
patas = property(_set_patas, _get_patas) |
import json
import boto3
from io import BytesIO
import os
from torch import load as load_model
from werkzeug.utils import secure_filename
from flask import Flask, render_template, request, jsonify, send_file
from flask_bootstrap import Bootstrap
from cyclegan import model
from PIL import Image
UPLOAD_FOLDER = 'tmp'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask(__name__)
Bootstrap(app)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
s3 = boto3.client("s3", aws_access_key_id=os.environ.get("ACCESS_ID"),
aws_secret_access_key=os.environ.get("ACCESS_KEY"))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def index():
return render_template("index.html")
@app.route("/convert", methods=['POST'])
def convert():
print(request)
if request.method == 'POST':
file = request.files['image']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
im = model.single_test(os.path.join(
app.config['UPLOAD_FOLDER'], filename))
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], filename))
image_pil = Image.fromarray(im)
in_mem_file = BytesIO()
image_pil.save(in_mem_file, format="png")
in_mem_file.seek(0)
s3.upload_fileobj(
in_mem_file, # This is what i am trying to upload
"cycleganapp",
filename,
ExtraArgs={
'ACL': 'public-read'
}
)
return jsonify({"image_link": "https://cycleganapp.s3-ap-southeast-1.amazonaws.com/"+filename})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5001, debug=True)
|
#_*_coding:utf-8_*_
# 作者 :Administrator
# 创建时间 :2020/4/116:41
# 文件 :test_login.py
from case.login_case.login import func_login
import allure
import pytest
@allure.story('测试登录接口')
@allure.title('输入正确的账号密码,登录成功')
def test_login_01(creat_data):
user = 'test_hlp'
pwd = '123456'
r = func_login(user, pwd)
assert r['code'] == 0
assert r['msg'] == 'login success!'
assert r['username'] == 'test_hlp'
@allure.story('测试登录接口')
@allure.title('输入未注册账号,登录失败')
def test_login_02():
user = 'abcdef'
pwd = '123456'
r = func_login(user, pwd)
assert r['code'] == 3003
assert r['msg'] == "账号或密码不正确"
test_data = [('123456','123456'),('test_hlp','66666')]
@allure.story('测试登录接口')
@allure.title(' 输入错误的账号或密码,登录失败')
@pytest.mark.parametrize('user, pwd',test_data)
def test_login_03(user, pwd):
r = func_login(user, pwd)
assert r['code'] == 3003
assert r['msg'] == "账号或密码不正确"
@allure.story('测试登录接口')
@allure.title(' 用户名或密码为空,登录失败')
@pytest.mark.parametrize('user, pwd',[('', '123456'),('test_hlp', '')])
def test_login_04(user, pwd):
r = func_login(user, pwd)
assert r['code'] == 3003
assert r['msg'] == "账号或密码不正确"
if __name__ == '__main__':
pytest.main('-s', 'test_login.py') |
import pymysql
db = pymysql.connect(host='127.0.0.1', port=4000,user='zyh',password='zyh',db='test')
cursor = db.cursor()
cursor.execute('show tables')
rsp = cursor.fetchall()
cursor.close()
db.close()
|
from django.db import models
# Create your models here.
# Products app with Product model.
# Product : name, weight, price, created_at, updated_at
class productModel(models.Model):
name = models.CharField(max_length=255, blank=False, null=False)
weight = models.IntegerField(blank=False, null=False)
price = models.IntegerField(blank=False, null=False)
created_at = models.TextField(blank=False, null=False)
updated_at = models.TextField(blank=False)
def __str__(self):
return self.name
|
# http://www.lintcode.com/zh-cn/problem/reverse-words-in-a-string/
def reverseWords(s):
"""
:param s: A string
:return: A string
"""
length = len(s)
list_word = []
i = 0
while i < length:
if s[i] != " ":
for j in range(i + 1, length):
if s[j] == " ":
index_end_letter = j - 1
list_word.append(s[i:index_end_letter + 1])
i += len(s[i:index_end_letter + 1])
break
elif j == length - 1:
index_end_letter = j
list_word.append(s[i:index_end_letter + 1])
i += len(s[i:index_end_letter + 1])
else:
i += 1
res = ""
for i in range(len(list_word) -1, -1, -1):
res += list_word[i]
if i != 0:
res += " "
return res
print(reverseWords("the sky is blue ")) |
# renames duplicate columns by suffixing _1, _2 etc
class renamer():
def __init__(self):
self.d = dict()
def __call__(self, x):
if x not in self.d:
self.d[x] = 0
return x
else:
self.d[x] += 1
return "%s_%d" % (x, self.d[x])
|
"""
Package: app
Package for the application models and services
This module also sets up the logging
"""
import os
import logging
from flask import Flask
# Create Flask application
app = Flask(__name__)
# Load Configurations
app.config.from_object('config')
app.config['SECRET_KEY'] = 'secret-for-dev'
app.config['LOGGING_LEVEL'] = logging.INFO
app.config['API_KEY'] = os.getenv('API_KEY')
# Import the service After the Flask app is created
from service import service, models
# Set up logging for production
print("Setting up logging for {}...".format(__name__))
app.logger.propagate = False
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
if gunicorn_logger:
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
app.logger.info("Logging established")
app.logger.info(70 * "*")
app.logger.info(" S U P P L I E R S S E R V I C E R U N N I N G ".center(70, "*"))
app.logger.info(70 * "*")
app.logger.info('Service inititalized!')
# If an API Key was not provided, autogenerate one
if not app.config['API_KEY']:
app.config['API_KEY'] = service.generate_apikey()
app.logger.info('Missing API Key! Autogenerated: {}'.format(app.config['API_KEY'])) |
"""
Create a polygon class and a rectangle class
that inherits from the polygon class and finds the square of rectangle.
"""
class Polygon:
def __init__(self, name: str, no_of_sides):
self.name = name
self.no_of_sides = no_of_sides
self.sides = [0] * no_of_sides
def __repr__(self):
return f'{self.name} -> {str(self.sides)}'
def input_sides(self):
"""
Initialize Polygon sides length.
:return: initialized polygon
"""
print(f'Enter {self.name} sides')
temp_list = []
for index, side in enumerate(self.sides, start=1):
temp_list.append(int(input(f'Enter side {index}: ')))
self.sides = temp_list
class Rectangle(Polygon):
"""
Class determined property of Rectangle(kind of Polygon).
"""
def area_calculate(self):
"""
Calculate rectangle area.
:return: rectangle area
"""
a, b, c, d = self.sides
return f'{self.name} area = {a * b}'
if __name__ == '__main__':
rectangle1 = Rectangle(name='Rectangle', no_of_sides=4)
rectangle1.input_sides()
print(rectangle1.area_calculate()) |
import sys
from tables import *
from optparse import OptionParser
"""Takes two .h5 files, one with correct analog data
and the other one with bad analog data and returns
a single .h5 file with all good data."""
usage = '%prog [options] analog.h5 stroklitude.h5 output.h5'
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) != 3:
print 'Please specify two input files and one output file.'
exit()
analog_fname = args[0]
strokelitude_fname = args[1]
output_fname = args[2]
# Open the two files that are to be fused
analog_h5 = openFile(analog_fname,mode='r')
strokelitude_h5 = openFile(strokelitude_fname,mode='r')
print
print '...---=== Analog file ===---...'
print analog_h5
print '...---=== Strokelitude file ===---...'
print strokelitude_h5
# Output file
fused = openFile(output_fname, mode = "w", title = "Fused file")
# Copy data
analog_h5.root.time_data._f_copy(fused.root)
analog_h5.root.ain_wordstream._f_copy(fused.root)
strokelitude_h5.root.stroke_data._f_copy(fused.root)
print '...---=== Fused file ===---...'
print fused
fused.close()
analog_h5.close()
strokelitude_h5.close()
|
def add(a,b):
sum=a+b
print(sum)
a=int(input())
b=int(input())
add(a,b)
#another code
def add(*element):
sum=0
for i in range(n):
sum=sum+arr[i]
print(sum)
arr=[]
n=int(input())
for i in range(n):
data=int(input())
arr.append(data)
add(arr,n) |
import pytest
from nn_models import model
""" Run with oython -m pytest"""
class TestModel:
"""Test for the model of a neural network."""
def test_Input_bias_Neuron(this):
"""Test for the bias/input neuron functionality."""
this.neru = model.Neuron(None) # an input neuron
# output tests
this.neru.setOutput(1.0)
assert this.neru.getOutput() == 1.0
this.neru.setOutput(10000000000)
assert (this.neru.getOutput() == 10000000000)
this.neru.setOutput(-100000000000)
assert (this.neru.getOutput() == -100000000000)
# error sum tests
assert (this.neru.getError() == 0.0)
this.neru.setError(5.0)
assert this.neru.getError() == 5.0
this.neru.addError(5.0)
assert this.neru.getError() == 10.0
this.neru.addError(-5.0)
assert this.neru.getError() == 5.0
assert (this.neru.getError() != 11.0)
# activation function tests
assert (this.neru.sigmoid(1) - 0.73105857863) <= 0.0001
assert (this.neru.sigmoid(-1000) == 0)
assert (this.neru.sigmoid(1000) - 1.0 <= 0.0001)
assert (this.neru.dSigmoid(0) == 0)
assert this.neru.dSigmoid(1000.0) == 1000.0 * (1.0 - 1000.0)
# feed forward test
this.neru.setOutput(1.0)
this.neru.feedForward()
assert (this.neru.getOutput() == 1.0)
# back propagate
this.neru.backPropagate()
assert (this.neru.getError() == 5.0)
def test_hiddenNeuron(this):
"""Test the hidden layer neuron."""
this.layers = []
this.neru = model.Neuron(None) # the inpt neru
# the input layer
this.prevLayer = [this.neru]
this.layers.append(this.prevLayer)
this.n = model.Neuron(this.layers[0]) # hidden neuron
# output tests
this.n.setOutput(-1000)
assert this.n.getOutput() == -1000
this.n.setOutput(0)
assert (this.n.getOutput() == 0)
this.n.setOutput(1)
assert (this.n.getOutput() == 1)
# error sum tests
assert (this.n.getError() == 0.0)
this.n.setError(-1.0)
assert this.n.getError() == -1.0
this.n.addError(5.0)
assert this.n.getError() == 4.0
this.n.addError(-5.0)
assert this.n.getError() == -1.0
assert (this.n.getError() != 100000.0)
# activation function tests
assert (this.n.sigmoid(1) - 0.73105857863 <= 0.0001)
assert (this.n.sigmoid(-1000) == 0)
assert (this.n.sigmoid(1000) - 1.0 <= 0.0001)
assert (this.n.dSigmoid(0) == 0)
assert this.n.dSigmoid(1000.0) == 1000.0 * (1.0 - 1000.0)
# feed forward test
this.n.setOutput(1.0)
this.n.feedForward()
assert (this.n.getOutput() == this.n.sigmoid(0))
# back propagate
this.n.setError(0.0)
this.n.backPropagate()
assert (this.n.getError() == 0.0)
assert (this.n.connections[0].connectedNeuron.getError() == 0)
def test_Connection(this):
"""Test for the connection class."""
this.n1 = model.Neuron(None)
this.n2 = model.Neuron(None)
# one way connection 1 ---> 2
this.n1.connections.append(model.Connection(this.n2))
assert this.n2 == this.n1.connections[0].connectedNeuron
with pytest.raises(AssertionError):
this.n1.connections.append(model.Connection(None))
def test_Net(this):
""" Test for the functionality of the network"""
# a simple AND neural net
this.AND_nn = model.Network([2, 1, 1])
this.input = [[1, 1], [1, 0], [0, 1], [0, 0]]
this.output = [[1], [0], [0], [0]]
for j in range(len(this.input)):
this.AND_nn.setInput(this.input[j]) # set the input
for i in range(len(this.input[j])):
assert this.AND_nn.layers[0][i].getOutput() == this.input[j][i]
|
import googlemaps as gm
from datetime import datetime
import matplotlib.pyplot as plt
import math
def get_matrix(destinations, method='distance'):
gmaps = gm.Client("AIzaSyBwMwayIZrYwfwotUim0QOvKVu4YZPEnw8")
cleaned = {}
for i, d in enumerate(destinations[:-1]):
cleaned = {**clean(i, gmaps.distance_matrix(
origins=[d],
destinations=destinations[i+1:],
mode="driving",
departure_time=datetime.now()
), method), **cleaned}
return cleaned
def clean(origin, mapping, method):
cleaned = {}
for i, destination in enumerate(mapping['rows'][0]['elements']):
cleaned[(origin, origin+i+1)] = destination[method]['value']
# for origin in mapping['rows']:
# row = []
# for destination in origin['elements']:
# row.append(destination[method]['value'])
# cleaned.append(row)
return cleaned
def check_range(mapping, radius):
maximum = max([d for d in [max(p) for p in mapping]])
print(mapping)
print(maximum)
return maximum >= radius
# DEBUG
def show_report(historyDistance, bestChromosome, firstChromosome, destinations, method):
print("\n===== Complete! =====")
print("First Fitness\t: {:.9f}".format(firstChromosome.fitness() / bestChromosome.fitness()))
print("First Distance\t: {:.5f} {}".format(firstChromosome.distance()/(1000 if method == "distance" else 3600), "km" if method == "distance" else "h"))
print("Best Fitness\t: {:.9f}".format(1))
print("Best Distance\t: {:.5f} {}".format(bestChromosome.distance()/(1000 if method == "distance" else 3600), "km" if method == "distance" else "h"))
plt.figure(1)
plt.subplot(211)
plt.plot(range(len(historyDistance)), historyDistance, color='g')
plt.ylabel('Distance')
plt.xlabel('Generations')
plt.title('GA : Performance by {}'.format(method))
gmaps = gm.Client("AIzaSyBwMwayIZrYwfwotUim0QOvKVu4YZPEnw8")
targets = []
for d in destinations:
geo = gmaps.geocode(d)[0]['geometry']['location']
targets.append((geo['lng'], geo['lat']))
plt.figure(2)
plt.axis('off')
x, y = zip(*targets)
plt.plot(x, y, 'ro')
last = bestChromosome.sequence[-1]
for i, s in enumerate(bestChromosome.sequence):
x, y = zip(targets[last], targets[s])
last = s
plt.plot(x, y, 'r')
for i, txt in enumerate(destinations):
plt.annotate(txt, tuple(targets[i]))
plt.title('GA : Best Path @ {}={:.5f}{}'.format(method, bestChromosome.distance()/(1000 if method == "distance" else 3600), "km" if method == "distance" else "h"))
plt.show(block=False)
plt.figure(3)
plt.axis('off')
x, y = zip(*targets)
plt.plot(x, y, 'ro')
last = firstChromosome.sequence[-1]
for i, s in enumerate(firstChromosome.sequence):
x, y = zip(targets[last], targets[s])
last = s
plt.plot(x, y, 'r')
for i, txt in enumerate(destinations):
plt.annotate(txt, tuple(targets[i]))
plt.title('GA : First Path @ {}={:.5f}{}'.format(method, firstChromosome.distance()/(1000 if method == "distance" else 3600), "km" if method == "distance" else "h"))
plt.show()
def geodetic_to_geocentric(lat, lon):
a, rf = (6378137, 298.257223563)
lat_rad = math.radians(lat)
lon_rad = math.radians(lon)
N = a / math.sqrt(1 - (1 - (1 - 1 / rf) ** 2) * (math.sin(lat_rad)) ** 2)
Y = (N) * math.cos(lat_rad) * math.cos(lon_rad)
X = (N) * math.cos(lat_rad) * math.sin(lon_rad)
return X, Y
|
cCurrencyDataSource = 'data/currency.json'
cCurrencyUpdateDataDestination = 'data/currency_latest.json'
cCurrencyURL = 'http://api.fixer.io/latest?base=INR'
cShowsDataSource = 'data/shows.xml'
cShowsUpdateDataDestination = 'data/shows.xml'
cShowsURL = "http://showrss.info/rss.php?user_id=207042&hd=0&proper=null&raw=false"
cTemperature = 'C' #"C" or "F"
cCity = "Bangalore"
cNLP_Dict = {'Shows':['shows','download']
,'Weather':['weather','umbrella']
,'Currency':['usd','inr','cad','aud']
,'Website':['up','down','unavailable','url','website']}
cError_Dict = {'Error-NLP-01':'Module not found with the input text'
,'Error-Currency-01':'Unable to fetch latest currency rates'} |
import RPi.GPIO as GPIO
import time
# blinking function
def blink(pin):
GPIO.output(pin, GPIO.HIGH)
time.sleep(1)
GPIO.output(pin, GPIO.LOW)
time.sleep(1)
return
# to use Raspberry Pi board pin numbers
GPIO.setmode(GPIO.BOARD)
# set up GPIO output channel
GPIO.setup(11, GPIO.OUT)
# blink GPI017 50 times
for i in range(0, 50):
blink(11)
GPIO.cleanup() |
"""
A script used to evaluate the BalancedBaggingClassifiers trained on several (12)
word2vec model outputs and using 50 or 100 decision tree estimators in order
to choose the best (word2vec model, n_estimator) configuration. The criteria
which will be used for selection is AUC score and the Brier score if necessary.
"""
from imblearn.ensemble import BalancedBaggingClassifier
from sklearn.externals import joblib
from sklearn.metrics import roc_auc_score, brier_score_loss
import pandas as pd
import glob, os, re
if __name__ == "__main__":
if os.path.isfile("./classifier_results.csv"):
results = pd.read_csv("./classifier_results.csv")
else:
results = pd.DataFrame(columns=["data_file", "bb_n_est", "auc", "brier"])
print("Loading data")
train_data = pd.read_csv("/data/SO_data/downvoter/wv_train_processed_data.csv")
val_data = pd.read_csv("/data/SO_data/downvoter/wv_val_processed_data.csv")
train_labels = train_data.score < 0
val_labels = val_data.score < 0
# Different word2vec models vectorized data. An all encompasing list for
# both titles and bodies. Should contain 24 elements if 12 w2v models.
instances = [re.match("./vectorized_data/train_(.+)", f).group(1)
for f in glob.glob("./vectorized_data/train_*")]
print(instances)
# We'll get two generators, which will be 'synchronized', one for training
# and one for vlaidation data.
train_vecs = ((joblib.load("./vectorized_data/train_%s" % i), i)
for i in instances)
val_vecs = (joblib.load("./vectorized_data/val_%s" % i)
for i in instances)
# BalancedBaggingClassifier n_estimator and n_jobs params
params = [(100, -1), (50, -1)]
for data in train_vecs:
val_vec = next(val_vecs) # 'sync' training and validation data
for param in params:
print("Processing %s %s" % (data[1], param[0]))
bb_model = BalancedBaggingClassifier(n_estimators=param[0],
n_jobs=param[1],
ratio="not minority")
print("Fitting...")
bb_model.fit(data[0], train_labels)
print("Testing...")
preds = bb_model.predict_proba(val_vec)
auc = roc_auc_score(val_labels, preds[:,1])
brier = brier_score_loss(val_labels, preds[:,1])
results = results.append({"data_file" : data[1],
"bb_n_est" : param[0],
"auc" : auc,
"brier" : brier},
ignore_index=True)
results.to_csv("./classifier_results.csv", index=False)
print("AUC: %.3f, BRIER: %.3f" % (auc, brier))
results.to_csv("./classifier_results.csv", index=False)
|
import os
import sys
import numpy as np
sys.path.append(os.getcwd() + '/src')
from PyExpUtils.results.results import loadResults, whereParametersEqual, splitOverParameter
from experiment.tools import iterateDomains, parseCmdLineArgs
from experiment import ExperimentModel
def printStats(exp_paths, metric):
print(f'-------------{metric}-------------')
exp = ExperimentModel.load(exp_paths[0])
results = loadResults(exp, f'{metric}.csv')
results_dict = splitOverParameter(results, 'tile-coder')
# two new streams over results
mine = results_dict['mine']
tiles3 = results_dict['tiles3']
# figure out what values of "tiles" we swept
tiles = splitOverParameter(mine, 'tiles').keys()
# figure out what values of "tilings" we swept
tilings = splitOverParameter(mine, 'tilings').keys()
for num_tiles in tiles:
for num_tilings in tilings:
mine_results = list(whereParametersEqual(mine, { 'tiles': num_tiles, 'tilings': num_tilings }))
tiles3_results = list(whereParametersEqual(tiles3, { 'tiles': num_tiles, 'tilings': num_tilings }))
mine_means = []
tiles3_means = []
# loop over each value of alpha
# this way we just get 3x as many samples of timing
for i in range(len(mine_results)):
mine_mean = mine_results[i].mean()[0]
tiles3_mean = tiles3_results[i].mean()[0]
mine_means.append(mine_mean)
tiles3_means.append(tiles3_mean)
mine_mean = np.mean(mine_means)
tiles3_mean = np.mean(tiles3_means)
# TODO: this is covering up a bug in results. Rerun results
if metric == 'feature_utilization':
mine_mean = mine_mean / (num_tilings * num_tiles**2)
tiles3_mean = tiles3_mean / (num_tilings * num_tiles**2)
print(f'({num_tiles}, {num_tilings}) -- {mine_mean}, {tiles3_mean}')
if __name__ == "__main__":
path, should_save, save_type = parseCmdLineArgs()
for domain in iterateDomains(path):
exp_paths = domain.exp_paths
save_path = domain.save_path
for metric in ['time', 'feature_utilization']:
printStats(exp_paths, metric)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.