text stringlengths 38 1.54M |
|---|
#!/bin/python
# This is an example app which post a shortcode and the desination url the redirect
#
# The intention is to gerenate personal short code URLs allowing analytics
# of the clicks, do to which user received them on for what product and which
# day of the week it was sent, and which position in the message was clicked if there
# are multiple short-code-links in the same message.
# the code was developed for experimenting with traffic redirection for a shopping site
import os, json, sys
import random, string
import urllib2, hashlib
## Parameters
campaign = "test001"
senderid = 12345;
secretsalt = "my secret salt";
###
def id_generator(size=6, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def createRedir(screen_name, position, e):
obj = {}
obj["shortcode"] = e["shortcode"]
obj["screen_name"] = screen_name
obj["user"] = hashlib.sha1(screen_name).hexdigest()
obj["prodid"] = e["product_id"]
obj["position"] = position
obj["senderid"] = senderid
obj["campaign"] = campaign
obj["desturl"] = e["desturl"] # You could add the hashed userid for dest side tracking
doc = json.dumps(obj)
sig = hashlib.sha1(doc + secretsalt ).hexdigest()
try:
headers = {'User-Agent': "PosterBoy/1.0", 'x-msg': doc}
# We should really use POST, but using GET for now
req = urllib2.Request("http://localhost:8000/newshortcode/" + sig, headers=headers)
usock = urllib2.urlopen(req)
info = usock.read()
usock.close()
except urllib2.HTTPError as err:
print err
return e["shortcode"]
print "http://localhost:8000/"+createRedir("albert",1,{"shortcode":id_generator(), "product_id": "E123-546","desturl":"http://shopping.example.com?utm=tracking123"})
|
import os
import yaml
import sys
import errno
import winreg
from re import match as re_match
from pathlib import Path
class Data():
def __init__(self, core_app):
self.app = core_app
self.app_data = os.getenv('LOCALAPPDATA')
self.app_data_path = os.path.join(self.app_data, 'eDrawingFinder')
self.log_path = os.path.join(self.app_data_path, 'log.log')
self.config_path = os.path.join(self.app_data_path, 'config.yaml')
self.op_path = os.path.join(self.app_data_path, 'op_database.p')
self.bm_path = os.path.join(self.app_data_path, 'bm_database.p')
if not os.path.exists(self.app_data_path):
try:
os.makedirs(self.app_data_path)
except OSError as exception:
self.app_data_path = os.getcwd()
if exception.errno != errno.EEXIST:
raise
def resource_path(self, relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def check_config(self, defaults):
if not os.path.exists(self.config_path):
with open(self.config_path, 'w+') as file:
yaml.dump(defaults, file, default_flow_style=False)
def get_eDrawing_executable(self):
path = Path('.')
# Attempt to find software install location using HKEY_CURRENT_USER
try:
key_local = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\eDrawings')
key_local_query = winreg.QueryInfoKey(key_local)
installed = [winreg.EnumKey(key_local, i) for i in range(key_local_query[0]) if re_match('.201.', winreg.EnumKey(key_local, i))]
version_dict = {}
for item in installed:
temp_key = winreg.OpenKey(key_local, item)
if winreg.QueryInfoKey(temp_key)[1] > 1:
location = winreg.QueryValueEx(temp_key, r'InstallDir')[0]
version_dict[item] = location
winreg.CloseKey(temp_key)
winreg.CloseKey(key_local)
newest = f"e{max([int(k.strip('e')) for k in version_dict.keys()])}"
files_in_location = os.listdir(version_dict[newest])
path = Path(location, [app for app in files_in_location if app == 'eDrawings.exe'][0])
except:
# If abouve errors out, attempt to find software install location using HKEY_LOCAL_MACHINE
try:
key_local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall')
key_local_query = winreg.QueryInfoKey(key_local)
location = None
for i in range(key_local_query[0]):
temp_key = winreg.OpenKey(key_local, winreg.EnumKey(key_local, i))
try:
location_check = winreg.QueryValueEx(temp_key, r'InstallLocation')[0]
if 'eDrawings' in location_check:
location = location_check
except:
pass
winreg.CloseKey(temp_key)
winreg.CloseKey(key_local)
if location is None:
raise Exception
else:
files_in_location = os.listdir(location)
path = Path(location, [app for app in files_in_location if app == 'eDrawings.exe'][0])
except:
# If both attempts error out, path = Path('.') which result in using os default for filetype.
pass
return path |
#! /bin/python3
import requests
from bs4 import BeautifulSoup
session = requests.Session()
url = "http://infinite.challs.olicyber.it/"
res = session.get(url)
res = res.text
# print(res)
# 1 in qualcosa per vedere se `e veramente dentro
# 2 cerca la stringa con find
# 3 beautfiulsoup
# Parse the html content
soup = BeautifulSoup(res, "lxml") # O REGEX
while True:
if "MATH TEST" in res:
print("ho trovato un test di")
paragraph = soup.find("p")
arr = str(paragraph).split(" ")
addendo = arr[3]
addendo2 = arr[5][:-1]
print(addendo, addendo2)
ans = int(addendo2) + int(addendo)
print(ans)
res = session.post(url, data={"sum": ans})
# print(res.text)
elif "ART TEST" in res:
# print("ARTEEE")
paragraph = str(soup.find("p"))
if "Blu" in paragraph:
# print(" bluuu ")
res = session.post(url, data={"Blu": ""})
elif "Verde" in paragraph:
# print( " verde ")
res = session.post(url, data={"Verde": ""})
elif "Rosso" in paragraph:
# print( " rosso ")
res = session.post(url, data={"Rosso": ""})
# print(res.text)
elif "GRAMMAR TEST" in res:
print("grammar!")
paragraph = soup.find("p")
arr = str(paragraph).split(" ")
lettera = arr[2][1]
parola = arr[-2][1:-2]
i = 0
for x in parola:
if lettera == x:
i += 1
ans = i
res = session.post(url, data={"letter": ans, "submit": "Submit"})
# print(res.text)
res = res.text
soup = BeautifulSoup(res, "lxml")
print(soup)
print(paragraph)
# 1 COSA CI RICHIEDE !!!!!!!!!!!!!!!!!!RIUSCITO
# 2 CALCOLARE LA RISspOTA RIUSCITO
# 3 SUBMITTARE LA RISPOSTA
# 4 RICEVERE ALTRE RISPOSTE |
number1= float(input('enter first number:'))
number2 =float(input('enter number2:'))
if number1 > number2:
number1bigger =True
else:
number1bigge=False
print('number1bigger:', number1bigger)
|
# coding: utf-8
# ### 1. 数据预处理。
# In[1]:
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
max_features = 20000
maxlen = 80
batch_size = 32
# 加载数据并将单词转化为ID,max_features给出了最多使用的单词数。
(trainX, trainY), (testX, testY) = imdb.load_data(num_words=max_features)
print(len(trainX), 'train sequences')
print(len(testX), 'test sequences')
# 在自然语言中,每一段话的长度是不一样的,但循环神经网络的循环长度是固定的,
# 所以这里需要先将所有段落统一成固定长度。
trainX = sequence.pad_sequences(trainX, maxlen=maxlen)
testX = sequence.pad_sequences(testX, maxlen=maxlen)
print('trainX shape:', trainX.shape)
print('testX shape:', testX.shape)
# ### 2. 定义模型。
# In[2]:
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# ### 3. 训练、评测模型。
# In[3]:
model.fit(trainX, trainY,
batch_size=batch_size,
epochs=10,
validation_data=(testX, testY))
score = model.evaluate(testX, testY, batch_size=batch_size)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
# -*- coding: utf-8 -*-
"""Generate a default configuration-file section for fn_clamav"""
from __future__ import print_function
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_clamav]
# hostname or ip address of Clamav server
host=localhost
# The TCP port Clamav listens on
port=3310
# Define socket timeout
timeout=500
"""
return config_data |
# Professor, fui fazendo na pressa e não me liguei que precisava usar orientação de arquivos para realizar os objetivos do exercício.
# O programa funciona como deveria, mas funciona como se fosse uma máquina de arcade nova e todos os recordes precisassem ser escritos
# do começo. Ta bonitinho, pode ver ai. Espero que entenda, obrigado pelo semestre!
# Só me liguei depois de horas fazendo do jeito que pensava ser o certo
import random, time
print("## Exercício 2 ##")
print("\n## Vamos lançar 3 dados...")
pontuações = {}
def jogar():
nome = input("\n# Entre seu nome: ")
time.sleep(1)
soma = 0
for i in range(3):
print("Lançando dado... ", end="")
time.sleep(1)
valor = random.randint(1, 6)
print(valor)
soma += valor
pontuações[nome] = soma
print(f"\n## {nome}, você obteve {soma} pontos")
run = True
while run:
choice = int(
input("\n(1)Jogar\n(2)Ver as pontuações de todos que já jogaram\n(3)Sair\n")
)
if choice == 1:
jogar()
elif choice == 2:
print(f"## Pontuações ##")
for x in pontuações:
print(pontuações[x])
elif choice == 3:
run = False |
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Map
df_tb = pd.read_excel('./data.xlsx')
def func(m):
a = []
for i in range(0, 35):
b = (df_tb['地区'][i], int(df_tb[m][i]))
a.append(b)
return a
#
datas2 = func('2016年')
print('=============datas=============')
datas = [('全国', 832), ('北京', 0), ('天津', 0), ('河北', 45), ('山西', 36), ('内蒙古', 31), ('辽宁', 0), ('吉林', 8), ('黑龙江', 21), ('上海', 0), ('江苏', 0), ('浙江', 0), ('安徽', 20), ('福建', 0), ('江西', 24), ('山东', 0), ('河南', 38), ('湖北', 28), ('湖南', 40), ('广东', 0), ('广西', 33), ('海南', 5), ('重庆', 14), ('四川', 66), ('贵州', 66), ('云南', 88), ('西藏', 74), ('陕西', 56), ('甘肃', 58), ('青海', 41), ('宁夏', 8), ('新疆', 32), ('台湾', 0), ('香港', 0), ('澳门', 0)]
print(datas)
for data in datas:
print(type(data[1]), end='')
print('\n')
print('=============datas2=============')
print(datas2)
for data in datas2:
print(type(data[1]), end='')
#
# #
# # provinces = []
# # values = []
# #
# # for i in range(0, 35):
# # location = df_tb['地区'][i]
# # value = df_tb['2016年'][i]
# # provinces.append(location)
# # values.append(value)
# #
# # map = Map().add("gdp", [data for data in zip(provinces, values)], 'china')
# # map.render('datas2_各省贫困县分布图.html')
#
# from pyecharts.charts import Map, Geo
# from pyecharts import options as opts
# # data=[("广东",10430.03),("山东",9579.31),("河南",9402.36),("四川",8041.82),("江苏",7865.99),("河北",7185.42),("湖南",6568.37),("安徽",5950.1),("浙江",5442),("湖北",5723.77),("广西",4602.66),("云南",4596.6),("江西",4456.74),("辽宁",4374.63),("黑龙江",3831.22),("陕西",3732.74),("山西",3571.21),("福建",3552),("重庆",2884),("贵州",3476.65),("吉林",2746.22),("甘肃",2557.53),("内蒙古",2470.63),("上海",2301.391),("台湾",2316.2),("新疆",2181.33),("北京",1961.2),("天津",1293.82),("海南",867.15),("香港",709.76),("青海",562.67),("宁夏",630.14),("西藏",300.21),("澳门",55.23)]
# # data = [('全国', 832), ('北京', 0), ('天津', 0), ('河北', 45), ('山西', 36), ('内蒙古', 31), ('辽宁', 0), ('吉林', 8), ('黑龙江', 21), ('上海', 0), ('江苏', 0), ('浙江', 0), ('安徽', 20), ('福建', 0), ('江西', 24), ('山东', 0), ('河南', 38), ('湖北', 28), ('湖南', 40), ('广东', 0), ('广西', 33), ('海南', 5), ('重庆', 14), ('四川', 66), ('贵州', 66), ('云南', 88), ('西藏', 74), ('陕西', 56), ('甘肃', 58), ('青海', 41), ('宁夏', 8), ('新疆', 32), ('台湾', 0), ('香港', 0), ('澳门', 0)]
# # data_new = [('全国', 832), ('北京', 0), ('天津', 0), ('河北', 45), ('山西', 36), ('内蒙古', 31), ('辽宁', 0), ('吉林', 8), ('黑龙江', 21), ('上海', 0), ('江苏', 0), ('浙江', 0), ('安徽', 20), ('福建', 0), ('江西', 24), ('山东', 0), ('河南', 38), ('湖北', 28), ('湖南', 40), ('广东', 0), ('广西', 33), ('海南', 5), ('重庆', 14), ('四川', 66), ('贵州', 66), ('云南', 88), ('西藏', 74), ('陕西', 56), ('甘肃', 58), ('青海', 41), ('宁夏', 8), ('新疆', 32), ('台湾', 0), ('香港', 0), ('澳门', 0)]
#
map=(
Map()
.add("", datas2, "china")
.set_global_opts(
title_opts=opts.TitleOpts(title="各省市人口数", subtitle="数据来源:中国统计年鉴(万人)", pos_right="center", pos_top="5%"),
visualmap_opts=opts.VisualMapOpts(max_=150),
)
)
# map.render_notebook()
map.render('各省市人口数分布图.html')
|
import numpy as np
__all__ = ['wswd2uv', 'uv2wswd']
# Using np.multiply and np.add and np.square to keep scaler.
def wswd2uv(ws, wd):
"""Convert wind speed and wind direction to u, v.
ws: wind speed
wd: wind direction (in degrees, north wind is 0, east wind is 90, etc)
Returns: u, v
"""
wd = np.deg2rad(wd)
u = -np.multiply(ws, np.sin(wd))
v = -np.multiply(ws, np.cos(wd))
return u, v
def uv2wswd(u, v):
"""Convert u, v wind to wind speed and wind direction.
u , v : u, v wind.
Returns: ws, wd
ws: wind speed
wd: wind direction (in degrees, north wind is 0, east wind is 90, etc)
"""
ws = np.hypot(u, v)
wd = np.fmod(np.rad2deg(np.arctan2(u, v)) + 180.0 , 360.0)
return ws, wd
|
#
# Sample Controlller for SIGVerse
#
import sys
import os
import time
import sig
import math
#
# Sample controller for SIGVerse
#
class AgentController(sig.SigController):
def onInit(self, evt):
try:
obj = self.getObj()
if not obj.dynamics() :
obj.setJointAngle("LARM_JOINT2", math.radians(-90))
obj.setJointAngle("RARM_JOINT2", math.radians(90))
except:
pass
return
def onAction(self, evt):
return 10.0
def onRecvMsg(self, evt):
try:
obj = self.getObj()
msg = evt.getMsg()
if msg == "Hello" :
# obj.setJointAngle("WAIST_JOINT1", math.radians(45))
obj.setJointQuaternion("WAIST_JOINT1", 0.707, 0.707, 0.0, 0.0)
else:
print msg
except:
print "ERROR"
pass
return
def onCollision(self, evt):
return
#
#
#
def createController(name, host, port):
return AgentController(name, host, port)
|
# Load dataset
import pandas as pd
from datetime import datetime
cluster_dataset = pd.read_csv("https://raw.githubusercontent.com/MoH-Malaysia/covid19-public/main/epidemic/clusters.csv")
cluster_dataset = pd.DataFrame(cluster_dataset)
new_cluster_dataset = cluster_dataset.reset_index(inplace=True)
new_cluster_dataset = cluster_dataset.rename(columns = {'index': 'cluster_id'})
"""
Get list of cluster based on id, category, and state
Example category:
[workplace, import, community, highRisk, religious, detentionCenter]
parameter: c_id = int, cat_name = string, state_name = string
return type: Dictionary / Lists
"""
def get_cluster_by(c_id=None, cat_name=None, state_name=None):
if cat_name is not None:
try:
return new_cluster_dataset.loc[cluster_dataset['category'].isin([cat_name])].to_dict('records')
except IndexError as e:
return e
elif c_id is not None:
try:
return new_cluster_dataset.iloc[c_id].to_dict() # return single data
except IndexError as e:
return e
elif state_name is not None:
try:
return new_cluster_dataset.loc[cluster_dataset['state'].str.contains(state_name)].to_dict('records')
except IndexError as e:
return e
else:
return {'error':'No data is selected'}
"""
Count cluster by category (workplace, community, education, highRisk, religous, detentionCenter, import)
Count total active and total overall cluster
"""
def cluster_count():
cat_count_total = cluster_dataset['category'].value_counts().to_dict()
total_active = cluster_dataset[cluster_dataset['status'] == 'active'].index.size
total_cluster = cluster_dataset.index.size
return {
'total_active': total_active,
'total_cluster': total_cluster,
'total_cluster_by_cat': cat_count_total,
'total_cat_count': len(cat_count_total)
}
"""
Chartjs config and data for cluster base on category
"""
def chart():
chart_title= "Cluster Category"
chart_config = 'doughnut' # Chart.js config
cat_labels = cluster_dataset['category'].value_counts().index.tolist()
cat_data = cluster_dataset['category'].value_counts().tolist()
return {
'chart_title': chart_title,
'chart_config': chart_config,
'cat_labels': cat_labels,
'cat_data': cat_data
}
"""
Get update date dataset from github
Ref: https://github.com/MoH-Malaysia/covid19-public/tree/main/epidemic#cluster-analysis
Get latest cluster record base on announced_date from dataset
"""
def update_date():
return cluster_dataset.iloc[-1:]['date_announced'].item()
"""
Get latest cluster record base on announced_date from dataset
select important columns only, to reduce load time.
"""
def new_cluster():
# last update date
lastupdate = update_date()
new_cluster_list = pd.DataFrame(
new_cluster_dataset, columns=[
'cluster',
'state',
'district',
'date_announced',
'category',
'status',
'cases_new',
'cases_total',
'cluster_id'
])
try:
#get cluster from high case to low case
cluster_list_data = new_cluster_list.query(f"date_announced == '{lastupdate}' and status == 'active'").sort_values('cases_total', ascending=False)
return {
'total_new_cluster': len(cluster_list_data),
'cluster_list_data': cluster_list_data.to_dict('records'),
}
except IndexError as e:
return e
"""
Search scope cluster name, state, and district
parameter string
"""
def search_cluster_data(search):
if search is not None:
try:
compile_data = pd.DataFrame(compile_search())
result = compile_data.loc[compile_data['cluster'].str.contains(search, case=False)].to_dict('records')
return {
'result': result,
'len_result': len(result)
}
except IndexError as e:
return e
"""
count cluster by state name
"""
def count_state_cluster(state_name):
return new_cluster_dataset.loc[cluster_dataset['state'].str.contains(state_name)].index.size
"""
ajax call
list of cluster
"""
def cluster_list():
try:
cluster_list = pd.DataFrame(new_cluster_dataset,
columns=[
'date_announced',
'cluster',
'state',
'district',
'category',
'status',
'cluster_id'
])
return cluster_list.to_dict('records')
except IndexError as e:
print(e)
return e
"""
Combine cluster name, state, and district data in one column `cluster`
return dictionary
"""
def compile_search():
search_data = []
try:
cluster_list = pd.DataFrame(new_cluster_dataset,
columns=[
'cluster',
'state',
'district',
'cluster_id'
])
compile_data = cluster_list.to_dict('index')
for key, _ in compile_data.items():
cluster_id = str(compile_data[key]['cluster_id'])
search_data.append(
{
'cluster': compile_data[key]['cluster'].rstrip() + " | " + compile_data[key]['state'].rstrip() + " | " + compile_data[key]['district'].rstrip(),
'cluster_id' : cluster_id
}
)
return search_data
except IndexError as e:
return e |
import numpy as np
import itertools
from collections import defaultdict
from datetime import datetime
import math
import sys
import os
from scipy.stats import percentileofscore
import graphlab as gl
import graphlab.aggregate as agg
gl.set_runtime_config('GRAPHLAB_CACHE_FILE_LOCATIONS','/home/mraza/tmp/')
gl.set_runtime_config('GRAPHLAB_DEFAULT_NUM_PYLAMBDA_WORKERS', 48)
# X1,X2,X3,X4,X5,X6,X7
# 2015-10-01 12:08:41,1046885725705,1046910448494,GSM,ITN006,,1.5
# 2015-10-01 16:55:32,1046885725705,1046910448494,GSM,ITN010,,1.5
def distance(l1_lat,l1_lng,l2_lat,l2_lng):
R = 6371; # Radius of the earth in km
d=0.0
try:
l1_lat, l1_lng, l2_lat, l2_lng=float(l1_lat), float(l1_lng), float(l2_lat), float(l2_lng)
except:
l1_lat, l1_lng, l2_lat, l2_lng=0.0,0.0,0.0,0.0
dLat = (l1_lat-l2_lat)*math.pi/180
dLon = (l1_lng-l2_lng)*math.pi/180
a = math.sin(dLat/2) * math.sin(dLat/2) +math.cos((l1_lat)*math.pi/180) * math.cos((l2_lat)*math.pi/180) * math.sin(dLon/2) * math.sin(dLon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c # Distance in km
return d
def _calc_rog(seq):
lat_lst,lng_lst=[],[]
for s in seq:
if s in loc_dct:
lat1, lng1=loc_dct[s]
lat_lst.append(lat1)
lng_lst.append(lng1)
centroid_lat=np.nanmean(lat_lst)
centroid_lng=np.nanmean(lng_lst)
cdistance=0.0
for i in range(len(lat_lst)):
cdistance+=distance(lat_lst[i], lng_lst[i], centroid_lat, centroid_lng)
if len(lat_lst)!=0:
cdistance/=len(lat_lst)
else:
cdistance=-1.0
return cdistance
def _diversity(seq):
#print 'len(seq)', len(seq)
# print seq
dc=defaultdict(int)
for each in seq:
dc[each]+=1
unique_count=len(dc.keys())
total_comm=float(np.sum(dc.values()))
for each in dc:
dc[each]=dc[each]/total_comm
numerator=np.sum([dc[each]*math.log(dc[each] )for each in dc])
denominator=-math.log(unique_count)
if unique_count!=1:
ret= float(numerator/denominator)
#print ret
return ret
else:
return -1.0
def _avg_distance(x,y):
len_x1, len_y1=len(x), len(y)
if len_x1==0 or len_y1==0:
return -1
else:
zipped=zip(x,y)
avg_distance=0
present=-1
count=0
for x1,y1 in zipped:
if x1=='' or y1=='':
avg_distance+=0
else:
if x1 in loc_dct and y1 in loc_dct:
lat1,lng1=loc_dct[x1]
lat2,lng2=loc_dct[y1]
avg_distance+=distance(lat1, lng1, lat2, lng2)
count+=1
present=1
if (present==0 and avg_distance==0) or count==0: # None of the tuples had both caller cell and the receiver cell
return -1
else:
return avg_distance/count
def _avgdiff(seq):
lst=[]
for d1 in seq:
#print d1
lst.append(d1)#datetime.strptime(d1, "%Y-%m-%d %H:%M:%S"))
#print 'Type {} {}'.format( lst, type(lst[0]))
lst2=[ (lst[i]- lst[i+1]).seconds for i in range(len(lst)-1)]
if len(lst2)==0:
return 0.0
else:
return float(np.nanmean(lst2))
def _unique_count(x):
return len(np.unique(x))
def _rank(x,y):
# print x, y
ret= percentileofscore(x,y)
return ret
# print ret, type(ret)
# if type(ret)!=float:
# return -1
# else:
# return ret
def getSeedDf(filename):
seeds_df=gl.SFrame.read_csv(filename)
return seeds_df
def castDate(x):
try:
y=datetime.strptime(x,"%Y%m%dT%H%M%S")
except:
try:
y=datetime.strptime(x,"%Y-%m-%d %H:%M")
except:
y=datetime.strptime("2017-02-28","%Y-%m-%d")
pass
pass
return y
def processDate(df):
df['Date2']=df['Date'].apply(lambda x:castDate(x))
df['Day']=df['Date2'].apply(lambda x:str(x.year)+'-'+str(x.month)+'-'+str(x.day))
df['IsWeekend']=df['Date2'].apply(lambda x:x.weekday() in [5,6])
df=df.remove_column('Date')
df.rename({'Date2':'Date'})
def getDf(filename, header, split_datetime=False):
df=gl.SFrame.read_csv(filename)
df=df.rename(header)
if split_datetime:
df['Date2']=df['Date'].apply(lambda x:castDate(x))
df['Day']=df['Date2'].apply(lambda x:str(x.year)+'-'+str(x.month)+'-'+str(x.day))
df['IsWeekend']=df['Date2'].apply(lambda x:x.weekday() in [5,6])
df=df.remove_column('Date')
df.rename({'Date2':'Date'})
df['Direction']='Out'
return df
def load_locations(loc_filename='./telenor_sites3.csv'):
dc={}
with open(loc_filename) as fin:
fin.next()# skip the header
for each in fin:
tower, lat, lng=each.split(',')
lat=float(lat)
lng=float(lng)
dc[tower]=(lat,lng)
return dc
def generate_pivots(cols_list):
keyCol='CallerId'
comb_params=[[keyCol]] # list of lists
exclude=[keyCol,'Duration','Date2','Date','Type','alter','GeoDistance']
#cols_list=voice_df2.column_names()
cols_list2=[each for each in cols_list if each not in exclude and each not in partition_columns]
for i in xrange(1, len(cols_list2)+1):
els = [[keyCol]+list(x) for x in itertools.combinations(cols_list2, i)]
comb_params.extend(els)
comb_params
return comb_params
# df_ret=apply_aggregate(voice_df2, comb, operations.keys(),operations , postfix)
def apply_aggregate(df2,pivot, columns, operations, postfix):
groupBy=pivot
cols=set(columns)-set(pivot)
#funcs=[count, mean, stddev]
#exprs = [f(col(c)).alias(operations_names_dict[f]+'('+c+')'+'_Per_'+postfix) for c in cols for f in operations[c] ]
exprs={}
for c in cols:
for f in operations[c]:
exprs[operations_names_dict[f]+'('+c+')'+'_Per_'+postfix]=f(c)
#print exprs
df_ret=df2.groupby(key_columns=groupBy, operations=exprs)
#df_ret.withColumn('avgdiff',avgdiff2(df_ret['joincollect(Date)']))
return df_ret
def align_for_network_aggregate(features_df, raw_df):
raw_df=raw_df[["ReceiverId"]]#,"CallerId"]]#.rename({'CallerId':'JoinKey'})
#features_df=features_df.rename({'CallerId':'JoinKey'})
joined_df=features_df.join(raw_df, {'CallerId':'ReceiverId'}, 'left')
#joined_df=joined_df.rename({'JoinKey':'CallerId'})
if 'ReceiverId' in joined_df.column_names():
joined_df=joined_df.remove_column('ReceiverId')
return joined_df
def network_aggregate(joined_df, postfix):
groupBy=['CallerId']
cols=set(joined_df.column_names())-set(['CallerId','ReceiverId'])
funcs=[ agg.MEAN, agg.STD,agg.SUM]# removing count as it would be equal to the degree
exprs={}
for c in cols:
for f in funcs:
exprs[operations_names_dict[f]+'('+c+')'+'_Per_'+postfix]=f(c)
df_ret=joined_df.groupby(key_columns=groupBy, operations=exprs)
return df_ret
def remove_column(df_ret, key):
for c in df_ret.column_names():
if key in c:
#print 'Dropping', c
df_ret=df_ret.remove_column(c)
return df_ret
def apply_special_operations(special_operations, pivot,df_ret, postfix):
final_list=list(set(special_operations)-set(pivot))
print 'final_list',final_list
print 'pivot',pivot
for key in final_list:
for op in special_operations[key]:
#print 'key={}, op={}'.format(key, str(op))
if type(key)!=tuple:
df_ret[operations_names_dict[op]+'('+key+')'+'_Per_'+postfix]=df_ret['joincollect('+key+')'+'_Per_'+postfix].apply(lambda x:op(x))
elif len(key)==2 and key[0] in final_list and key[1] in final_list:
df_ret[operations_names_dict[op]+'('+key[0]+';'+key[1]+')'+'_Per_'+postfix]=df_ret.apply(lambda x: op(x['joincollect('+key[0]+')'+'_Per_'+postfix],
x['joincollect('+key[1]+')'+'_Per_'+postfix]))
return df_ret
def composite_reduce(df_ret, comb):
operations2={}
new_cols=set(df_ret.column_names())-set(comb)
for each in new_cols:
operations2[each]=[agg.STD]
# removing mean as mean of mean is equal to the original mean, same for count, min , max, sum
df_ret_base=apply_aggregate(df_ret,['CallerId'], new_cols, operations2, postfix='CallerId')
#print 'Reduce complete'
return df_ret_base
def merge_dfs_from_dict(features_dc):
lst=[k for k in features_dc.keys() if 'Date' not in k]
final_merged_df=features_dc[lst[0]]
print final_merged_df.shape
for key in lst[1:]:
print key
temp=features_dc[key]
final_merged_df=final_merged_df.join(temp, 'CallerId', 'outer')
print 'after join count', final_merged_df.shape
return final_merged_df
def merge_sorted_dfs_from_dict(features_dc):
lst=[k for k in features_dc.keys() if 'Date' not in k]
final_merged_df=features_dc[lst[0]]
print final_merged_df.shape
for key in lst[1:]:
for c in features_dc[key].column_names():
if c!='CallerId':
final_merged_df[c]=features_dc[key][c]
print 'after join count', final_merged_df.shape
return final_merged_df
def network_rank(raw_df,features_df):
raw_df=raw_df[["ReceiverId","CallerId"]]
joined_df=features_df.join(raw_df, 'CallerId', 'left')
joined_df=joined_df.remove_column('ReceiverId')
groupBy=['CallerId']
cols=set(joined_df.column_names())-set(['CallerId','ReceiverId'])
funcs=[agg.CONCAT]
exprs={}
for c in cols:
for f in funcs:
exprs[operations_names_dict[f]+'('+c+')']=f(c)
df_ret=joined_df.groupby(key_columns=groupBy, operations=exprs)
df_ret=df_ret.join(features_df, 'CallerId', 'left')
for each in df_ret.column_names():
if 'CallerId'!=each and 'joincollect' not in each:
print each
df_ret['rank('+each+')']=df_ret.apply(lambda x:_rank(x['joincollect('+each+')'],x[each]))
for each in df_ret.column_names():
if each!='CallerId' and 'rank' not in each:
df_ret=df_ret.remove_column(each)
return df_ret
def feature_name_counting(df, comb):
print 'feature_name_counting', comb,' ',df.shape
from collections import defaultdict
header=df.column_names()
lst=['(Duration)','(ReceiverCell)','(CallerCell)','(ReceiverId)','(CallerCell;ReceiverCell)','(Date)','(Day)']
dc=defaultdict(int)
dc_names=defaultdict(list)
to_remove=[]
for each in header:
for pattern in lst:
if pattern in each:
dc[pattern]+=1
dc_names[pattern].append(each)
to_remove.append(each)
header2=list(set(header)-set(to_remove))
print 'Inside Feature name counting func: unmatched', header2
print dc
def analyze(voice_df2,seeds_df,filter_name='', join_with_seeds=True):
comb_params=[each for each in generate_pivots(voice_df2.column_names())]
keyCol='CallerId'
features_dc, features_dc_names={},{}
for i,comb in enumerate(comb_params):
print '*** i, comb', i, comb
postfix=':'.join(x for x in comb)
print '*** Base Aggregation'
df_ret=apply_aggregate(voice_df2, comb, operations.keys(),operations , postfix)
print '*** Shape after base aggregate', df_ret.shape
print '*** Special Operations'
df_ret=apply_special_operations(special_operations,comb, df_ret, postfix)
print '*** Remove extra columns'
df_ret=remove_column(df_ret, 'joincollect')
print '*** Shape after special operations', df_ret.shape
print '*** Reducing to the base level'
if len(comb)>1:
df_joined=composite_reduce(df_ret, comb)
else:
df_joined=df_ret
# Reduce the extra columns from the df_joined
for c in comb:
if c!=keyCol:
if c in df_joined.column_names():
df_joined=df_joined.remove_column(c)
features_dc[tuple(comb)]=df_joined
features_dc_names[tuple(comb)]=df_joined.column_names()
print '*** Shape after composite reduce', df_joined.shape
feature_name_counting(features_dc[tuple(comb)], comb)
#print 'Column_names', df_joined.column_names()
#df_joined.export_csv('features_temp'+postfix+'.csv')
# Only calculating network rank for the first level features
network_rank_df=network_rank(voice_df2, features_dc['CallerId',])
network_rank_df=remove_column(network_rank_df, 'joincollect')
print '*** shape of the network rank df is ', network_rank_df.shape
#merged_df=merge_dfs_from_dict(features_dc)
for each in features_dc:
print each
features_dc[each]=features_dc[each].sort('CallerId')
network_rank_df=network_rank_df.sort('CallerId')
# This join needs to be changed
features_dc['network_rank_CallerId']=network_rank_df
merged_df=merge_sorted_dfs_from_dict(features_dc)#.join(network_rank_df, on='CallerId', how='left')
joined_df=align_for_network_aggregate(merged_df, voice_df2)
joined_df=joined_df.sort('CallerId')
callerids=joined_df['CallerId'].unique()
chunks = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]
if len(callerids)>2000000:
callerIdList=chunks(callerids, len(callerids)/4)
net_df_lst=[]
for i in range(len(callerIdList)):
joined_df_temp=joined_df.filter_by(callerIdList[i],'CallerId')
net_df_temp=network_aggregate(joined_df_temp, postfix='Network')
net_df_lst.append(net_df_temp)
net_df=net_df_lst[0]
for i in range(1,len(net_df_lst)):
net_df=net_df.append(net_df_lst[i])
else:
net_df=network_aggregate(joined_df, postfix='Network')
print '*** Shape of the network aggregate df ',net_df.shape
merged_net_df=merged_df.join(net_df, on='CallerId', how='left')
#merged_net_df=merged_df
if join_with_seeds:
merged_net_df=merged_net_df.join(seeds_df, on='CallerId', how='right')
if filter_name!='':
rename_dc={}
for each in set(merged_net_df.column_names())-set(['CallerId']):
rename_dc[each]=each.replace(',',';')
merged_net_df=merged_net_df.rename(rename_dc)
rename_dc={}
for each in set(merged_net_df.column_names())-set(['CallerId']):
rename_dc[each]='Partition:'+filter_name+';'+each
merged_net_df=merged_net_df.rename(rename_dc)
print '*** Final Shape', merged_net_df.shape
return merged_net_df, features_dc_names
dc_header={'DateTime':0,'CallerId':1,'ReceiverId':2,'Type':3,'CallerCell':4,'ReceiverCell':5, 'Duration':6}
operations_names_dict={agg.COUNT:'count',agg.SUM:'sum',agg.MEAN:'mean',agg.STD:'stddev',agg.CONCAT:'joincollect',
agg.MIN:'min',agg.MAX:'max',_avgdiff:'avgdiff',_calc_rog:'calc_rog',
_diversity:'diversity', _avg_distance:'avg_distance',_unique_count:'unq'}
operations={'Duration':[agg.MEAN,agg.STD,agg.SUM],'ReceiverId':[agg.COUNT,agg.CONCAT],'CallerCell':[ agg.CONCAT],
'ReceiverCell':[agg.CONCAT],'Day':[agg.CONCAT],'Date':[agg.CONCAT],'GeoDistance':[agg.COUNT,agg.MEAN,agg.STD,agg.MIN,agg.MAX,agg.SUM]}
special_operations={#'Date':[_avgdiff],
'Day':[_unique_count],'ReceiverId':[_unique_count],
'CallerCell':[ _unique_count],
'ReceiverCell':[ _unique_count]}
partition_columns=['Alter','Direction','IsWeekend','Type']
def swap_directions(sf):
sf2=sf.copy()
sf2=sf2.rename({'ReceiverId':'Temp','ReceiverCell':'TempCell'})
sf2=sf2.rename({'CallerId':'ReceiverId','CallerCell':'ReceiverCell'})
sf2=sf2.rename({'Temp':'CallerId','TempCell':'CallerCell'})
sf2['Direction']='In'
return sf2
if __name__=='__main__':
if len(sys.argv)!=5:
print 'Wrong ags, pass CDR file, seed file, key_for_output and output_dir'
sys.exit(-1)
fname=sys.argv[1] # CDR file
sample=sys.argv[2] # seed file
key = sys.argv[3] # key
out_dir=sys.argv[4]
df=gl.SFrame.read_csv(fname, column_type_hints=str)
if 'sms' in fname:
df['Duration']=0.0
df['Duration']=df['Duration'].astype(float)
df['GeoDistance']=0.0
if 'CallerLAC' in df.column_names():
df['CallerCell']=df.apply(lambda x:x['CallerCell']+'_'+x['CallerLAC'])
df=df.remove_column('CallerLAC')
if 'ReceiverLAC' in df.column_names():
df['ReceiverCell']=df.apply(lambda x:x['ReceiverCell']+'_'+x['ReceiverLAC'])
df=df.remove_column('ReceiverLAC')
#df['Duration']=df['Duration'].astype(float)
processDate(df)
sample=gl.SFrame.read_csv(sample, header=False, column_type_hints=df['CallerId'].dtype()).rename({'X1':'CallerId'})
ret, features_dc_names=analyze(df,sample, key)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
ret.export_csv(out_dir+'//features_'+key+'.csv')
|
"""
persistor base class
"""
class PersistorBase():
def __init__(self):
pass
def write(self, feature, dumps, **kwargs):
raise NotImplementedError("Persistor write method implementation error!")
def read(self, uid, **kwargs):
raise NotImplementedError("Persistor read method implementation error!")
def delete(self, uid, **kwargs):
raise NotImplementedError("Persistor delete method implementation error!") |
# Generated by Django 3.0.6 on 2020-05-07 16:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mall', '0013_auto_20200508_0028'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='cart_total_price',
),
migrations.AddField(
model_name='cart',
name='last_update',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
# Complete the following exercises
# a. Find the average of following numbers, assign it to a variable, print the average
# and total numbers used for the average:
# i. For Example: For numbers 44, 64, 88, 53, 89, when you run the file it
# should print something like: The average of 5 given numbers is : 67.6.
num1 = 44
num2 = 64
num3 = 88
num4 = 53
num5 = 89
total_sum = num1 + num2 + num3 + num4 + num5
average = total_sum / 5
print("The average of the 5 given numbers is: " + average)
# ii. 39, 45, 55, 90, 95, 96
# iii. 54, -45, -10, 90
# iv. 55,65,75,95,32
|
import pandas as pd
import matplotlib.pyplot as plt
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
data_tables = {
'services_exports':{'filename':os.path.join(ROOT_DIR, 'SGP_services.csv')},
'services_imports': {'filename': os.path.join(ROOT_DIR, 'SGP_services_imports.csv')},
'employment': {'filename': os.path.join(ROOT_DIR, 'employment_by_industry.csv')},
'services_expense': {'filename': os.path.join(ROOT_DIR, 'services_expenditures.csv')},
'services_emission_factors': {'filename': os.path.join(ROOT_DIR, 'services_emission_factors.csv')},
}
CAT_FIELDS = ['Year','Region','Country']
def read_tables():
for f in data_tables:
data_tables[f]['table'] = pd.read_csv(data_tables[f]['filename'])
#employment
#------------------------------------------------------------------------
EMP_CAT_FIELDS = ['Year','Occupation group']
def emp_analysis():
read_tables()
emp_cleanup()
tbl = data_tables['employment']['table'].copy()
def emp_cleanup():
tbl = data_tables['employment']['table'].copy()
tbl.fillna(0,inplace=True)
occp_groups = list(tbl['Occupation group'].unique())
industries = [x for x in tbl.columns if not x in EMP_CAT_FIELDS]
data_tables['employment']['Occupation groups'] = occp_groups
data_tables['employment']['Industries'] = industries
data_tables['employment']['table'] = tbl
def emp_by_occp_group(industries=None):
tbl = data_tables['employment']['table'].copy()
if industries is None:
industries = data_tables['employment']['Industries']
tbl['all industries'] = tbl[industries].sum(1)
pvt = pd.pivot_table(tbl,index=['Year'],columns=['Occupation group'],
values=['all industries'],aggfunc=sum)
return pvt['all industries']
def emp_by_industry(occp_groups=None):
tbl = data_tables['employment']['table'].copy()
if occp_groups is None:
occp_groups = data_tables['employment']['Occupation groups']
industries = data_tables['employment']['Industries']
tbl.set_index('Year', inplace=True)
tb_set = [tbl[tbl['Occupation group'] == x][industries] for x in occp_groups]
tb_all = sum(tb_set)
return tb_all
def plot_tbl(tbl,ax):
graph_scale = 0.7
fontsize = 6
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * graph_scale, box.height])
tbl.plot(kind='bar', stacked=True, ax=ax)
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5),prop={'size':6})
#services
#------------------------------------------------------------------------
def services_analysis():
read_tables()
srv_cleanup()
def srv_cleanup():
#create service table - add others
for t in ['services_exports','services_imports']:
tbl = data_tables[t]['table']
tbl = add_others(tbl)
data_tables[t]['table'] = tbl
def srv_by_country(tbl_flag='services_exports'):
tbl = data_tables[tbl_flag]['table']
tbl['srv totals'] = tbl[[x for x in tbl.columns if not x in CAT_FIELDS]].sum(1)
pvt = pd.pivot_table(tbl, index=['Year'],
columns=['Country'],values='srv totals', aggfunc=sum)
return pvt
def srv_by_service(tbl_flag='services_exports'):
tbl = data_tables[tbl_flag]['table']
pvt = pd.pivot_table(tbl, index=['Year'],
values=[x for x in tbl.columns if not x in CAT_FIELDS], aggfunc=sum)
return pvt
def get_region_slice(region,tbl):
slice = tbl[tbl.Region == region]
return slice
def add_others(tbl):
regions = list(tbl.Region.unique())
for r in regions:
tbl = add_region_others(tbl, r)
tbl.fillna(0,inplace=True)
return tbl
def add_region_others(tbl,r):
slc = get_region_slice(r, tbl).copy()
slc_total = slc[slc.Country == 'Total']
if len(slc_total)>0:
slc_total = slc_total[[x for x in tbl.columns if not x in ['Country','Region']]].fillna(0)
slc_total.set_index('Year',inplace=True)
slc_extotal = pd.pivot_table(slc[slc.Country != 'Total'].fillna(0), index=['Year'],
values=[x for x in tbl.columns if not x in CAT_FIELDS], aggfunc=sum)
ex = slc_total-slc_extotal
ex['Region'] = r
ex['Country'] = r + ' others'
ex.reset_index(inplace=True)
tbl.drop(slc[slc.Country == 'Total'].index,inplace=True)
tbl=tbl.append(ex)
return tbl
#services emissions intensity
#------------------------------------------------------------------------
SRV_EXP_CAT_FIELDS = ['Year','Services sector']
def serv_exp_cleanup():
# 'services_expense'
# 'services_emission_factors'
tbl_srv = data_tables['services_expense']['table'].copy()
tbl_ef = data_tables['services_emission_factors']['table'].copy()
tbl_srv.fillna(0,inplace=True)
tbl_ef.fillna(0,inplace=True)
# occp_groups = list(tbl['Occupation group'].unique())
# industries = [x for x in tbl.columns if not x in EMP_CAT_FIELDS]
# data_tables['employment']['Occupation groups'] = occp_groups
# data_tables['employment']['Industries'] = industries
data_tables['services_expense']['table'] = tbl_srv
data_tables['services_emission_factors']['table'] = tbl_ef
def srv_emissions_intensity_tbl():
tbl_srv = data_tables['services_expense']['table'].copy()
tbl_ef = data_tables['services_emission_factors']['table'].copy()
mtbl = pd.melt(tbl_srv, id_vars=SRV_EXP_CAT_FIELDS,
value_vars=[x for x in tbl_srv.columns if not x in SRV_EXP_CAT_FIELDS],
var_name='Expense type')
btbl = pd.merge(mtbl, tbl_ef, on='Expense type')
btbl['Emissions tCO2'] = btbl['value'] * btbl['Emissions factor kgCO2/SGD'] / 1000
return btbl
def srv_expense_by_group(units='value',year=2017,showall=False):
btbl = srv_emissions_intensity_tbl()
ptype = pd.pivot_table(btbl, index=['Year', 'Services sector'],
columns=['Group'], values=[units], aggfunc=sum)[units]
if not showall:
ptype = ptype[[x for x in ptype.columns if not x == 'Excluded']].copy()
return ptype.loc[year]
|
from flask_wtf import FlaskForm
from wtforms.validators import InputRequired, Length, EqualTo, Email, DataRequired, Optional
from wtforms import Form, StringField, SelectField, TextAreaField, PasswordField, IntegerField, SubmitField, DateField, FileField, validators
from wtforms.fields.html5 import EmailField
class LoginForm(FlaskForm):
username = StringField([validators.Length(min=1, max=30)], render_kw={"placeholder": "Username"})
password = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Password"})
submit = SubmitField('Submit')
class EditRentalPin(FlaskForm):
rentalName = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Rental Name"})
price = IntegerField([validators.NumberRange(min=0, max=350000)], render_kw={"placeholder": "Price"})
rooms = IntegerField([validators.NumberRange(min=0, max=100)], render_kw={"placeholder": "Rooms"})
description = TextAreaField([validators.Length(min=10)], render_kw={"placeholder": "Decription"})
link = StringField([validators.Length(min=4, max=355)], render_kw={"placeholder": "Link"})
passwordUser = PasswordField([validators.InputRequired()], render_kw={"placeholder": "User Password"})
passwordGroup = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Group Password"})
submit = SubmitField("Submit")
class EditRestaurantPin(FlaskForm):
name = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Name"})
description = TextAreaField([validators.Length(min=10)], render_kw={"placeholder": "Decription"})
link = StringField([validators.Length(min=4, max=355)], render_kw={"placeholder": "Link"})
type = SelectField("Type", choices=[('Restaurant', 'Restaurant'), ('Nightclub', 'Nightclub')])
passwordUser = PasswordField([validators.InputRequired()], render_kw={"placeholder": "User Password"})
passwordGroup = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Group Password"})
submit = SubmitField("Submit")
class EditTransportationPin(FlaskForm):
name = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Name"})
price = IntegerField([validators.NumberRange(min=0, max=100000)], render_kw={"placeholder": "Price"})
description = TextAreaField([validators.Length(min=10)], render_kw={"placeholder": "Decription"})
link = StringField([validators.Length(min=4, max=555)], render_kw={"placeholder": "Link"})
type = SelectField("Type", choices=[('Flight', 'Flight'), ('Train', 'Train'), ('Bus', 'Bus'), ('Other', 'Other')])
passwordUser = PasswordField([validators.InputRequired()], render_kw={"placeholder": "User Password"})
passwordGroup = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Group Password"})
submit = SubmitField("Submit")
class EditActivityPin(FlaskForm):
name = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Name"})
price = IntegerField([validators.NumberRange(min=0, max=100000)], render_kw={"placeholder": "Price"})
description = TextAreaField([validators.Length(min=10)], render_kw={"placeholder": "Decription"})
link = StringField([validators.Length(min=4, max=355)], render_kw={"placeholder": "Link"})
type = SelectField("Type", choices=[('Indoor', 'Indoor'), ('Outdoor', 'Outdoor')])
passwordUser = PasswordField([validators.InputRequired()], render_kw={"placeholder": "User Password"})
passwordGroup = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Group Password"})
submit = SubmitField("Submit")
class SignupForm(FlaskForm):
firstName = StringField('First Name:', [validators.Length(min=2, max=30, message=("First name must be between 2 and 30 characters."))], render_kw={"placeholder": "First Name"})
lastName = StringField('Last Name:', [validators.Length(min=2, max=40, message="Last name must be between 2 and 40 characters.")], render_kw={"placeholder": "Last Name"})
username = StringField('Username:', [validators.Length(min=2, max=30, message="Username must be between 2 and 30 characters")], render_kw={"placeholder": "Username"})
email = StringField('Email:', [validators.Email()], render_kw={"placeholder": "Email"})
password = PasswordField('Password:', [
validators.InputRequired(),
validators.EqualTo('confirm', message='Passwords do not match.')], render_kw={"placeholder": "Password"})
confirm = PasswordField('Confirm Password:', render_kw={"placeholder": "Confirm Password"})
submit = SubmitField('Submit')
class EditProfileForm(FlaskForm):
firstName = StringField('First Name:', [validators.Length(min=1, max=30)], render_kw={"placeholder": "First Name"})
lastName = StringField('Last Name:', [validators.Length(min=1, max=30)], render_kw={"placeholder": "Last Name"})
email = EmailField('Email:', [validators.Email()], render_kw={"placeholder": "Email"})
password2 = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Password"})
submit = SubmitField('Submit')
class GroupForm(FlaskForm):
groupName = StringField('Group Name:', [validators.Length(min=2, max=30, message=("Group name must be between 2 and 30 characters"))], render_kw={"placeholder": "Group Name"})
location = StringField('Location', [validators.Length(min=2, max=35, message=("Location must be between 2 and 35 characters"))], render_kw={"placeholder": "Location"})
startDate = DateField('Start date:', render_kw={"placeholder": "Start date"})
endDate = DateField('End date:', render_kw={"placeholder": "End date"})
password = PasswordField('Password:', [
validators.InputRequired(),
validators.EqualTo('confirm', message='Passwords do not match.')], render_kw={"placeholder": "Password"})
confirm = PasswordField('Confirm Password:', render_kw={"placeholder": "Confirm password"})
description = TextAreaField('Description/Message:', [validators.Length(min=10, max=500)], render_kw={"placeholder": "Description/Message"})
# image = FileField('Image(Optional)', [validators.Optional()], render_kw={'placeholder': 'Image(Optional)'})
submit = SubmitField('Submit')
class DeleteProfileForm(FlaskForm):
password = PasswordField([validators.InputRequired()], render_kw={'placeholder': 'Password'})
yes = SubmitField('Yes')
class RentalPinForm(FlaskForm):
rentalName = StringField('Name:', [validators.Length(min=2, max=40)], render_kw={"placeholder": "Rental Name"})
price = IntegerField('Price:', [validators.NumberRange(min=0, max=10000)], render_kw={"placeholder": "Price"})
rooms = IntegerField('Rooms', [validators.NumberRange(min=0, max=35)], render_kw={"placeholder": "Rooms"})
description = TextAreaField('Description:', [validators.Length(min=10, max=500, message=("Description must be between 10 and 500 characters."))], render_kw={"placeholder": "Description"})
link = StringField('Link to this rental:', [validators.Length(min=5, max=255)], render_kw={"placeholder": "Link"})
submit1 = SubmitField('Submit')
# Images(blob) TODO
class RequestNewPassword(FlaskForm):
email = StringField('Email:', [validators.Email()], render_kw={"placeholder": "Email"})
submit = SubmitField('Submit')
class ResetPassword(FlaskForm):
username = StringField('Username', render_kw={"placeholder": "userName"})
password = PasswordField('Password:', [
validators.InputRequired(),
validators.EqualTo('confirm', message='Passwords do not match.')], render_kw={"placeholder": "Password"})
confirm = PasswordField('Confirm Password:', render_kw={"placeholder": "Confirm password"})
submit = SubmitField('Submit')
class RestPinForm(FlaskForm):
restName = StringField('Name:', [validators.Length(min=2, max=40)], render_kw={"placeholder": "Name"})
description2 = TextAreaField('Description:', [validators.Length(min=10)], render_kw={"placeholder": "Description"})
link2 = StringField('Link:', [validators.Length(min=5, max=500)], render_kw={"placeholder": "Link"})
type = SelectField('Type', choices=[('Restaurant', 'Restaurant'), ('Nightclub', 'Nightclub')])
submit2 = SubmitField('Submit')
class ActivityPinForm(FlaskForm):
activityName = StringField('Name', [validators.Length(min=4, max=40)], render_kw={"placeholder": "Activity Name"})
description = TextAreaField('Description', [validators.Length(min=4)], render_kw={"placeholder": "Description"})
price = IntegerField('Price:', [validators.NumberRange(min=0, max=10000)], render_kw={"placeholder": "Price"})
link = StringField('Link', [validators.Length(min=6, max=500)], render_kw={"placeholder": "Link"})
type = SelectField('Type', choices=[('Indoor', 'Indoor'), ('Outdoor', 'Outdoor')], render_kw={'placeholder': 'Type'})
submit4 = SubmitField('Submit')
class TransportationPinForm(FlaskForm):
name = StringField('Name',[validators.Length(min=3, max=45)], render_kw={"placeholder": "Name"})
price3 = IntegerField('Description',[validators.NumberRange(min=0, max= 1000000)], render_kw={"placeholder": "Price"})
link3 = StringField('Link', [validators.Length(min=5, max=500)], render_kw={"placeholder": "Link"})
description3 = TextAreaField('Description',[validators.Length(min=10, max=500)], render_kw={"placeholder": "Description"})
type = SelectField("Type", choices=[('Flight', 'Flight'), ('Train', 'Train'), ('Bus', 'Bus'), ('Rental Car', 'Rental Car'), ('Other', 'Other')])
submit3 = SubmitField('Submit')
class EditGroupForm(FlaskForm):
name = StringField([validators.Length(min=2, max=30)], render_kw={"placeholder": "Group Name"})
location = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Location"})
startDate = StringField('Start date:', render_kw={"placeholder": "Start date"})
endDate = StringField('End date:', render_kw={"placeholder": "End date"})
description = TextAreaField([validators.Length(min=10)], render_kw={"placeholder": "Description/Message"})
groupPassword = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Group Password"})
password = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Your Password"})
submit = SubmitField('Submit')
class DeleteGroupForm(FlaskForm):
passwordUser = PasswordField([validators.InputRequired()], render_kw={'placeholder': 'User Password'})
yes = SubmitField('Yes')
class JoinGroupForm(FlaskForm):
name = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Group Name"})
password = PasswordField([validators.InputRequired()], render_kw={"placeholder": "Password"})
submit = SubmitField()
class JoinGroupRequestForm(FlaskForm):
name2 = StringField([validators.Length(min=2, max=35)], render_kw={"placeholder": "Group Name"})
messageJoin = StringField([validators.Length(min=15, max=255)], render_kw={"placeholder": "Message"})
submit2 = SubmitField('Submit')
class DeletePinForm(FlaskForm):
passwordUser = PasswordField([validators.InputRequired()], render_kw={'placeholder': 'User Password'})
passwordGroup = PasswordField([validators.InputRequired()], render_kw={'placeholder': 'Group Password'})
yes = SubmitField('Yes')
class InviteGroupNoAccountForm(FlaskForm):
email1 = StringField([validators.Email()], render_kw={'placeholder': 'Email'})
submit1 = SubmitField('Submit')
class InviteGroupByUsernameForm(FlaskForm):
username = StringField([validators.Length(min=2, max=30)], render_kw={"placeholder": "Username"})
submit2 = SubmitField('Submit')
class InviteGroupByEmailForm(FlaskForm):
email2 = StringField([validators.Email()], render_kw={'placeholder': 'Email'})
submit3 = SubmitField('Submit')
class JoinGroupFromInviteForm(FlaskForm):
yes = SubmitField('Yes')
no = SubmitField('No')
class UserGroupJoinDecisionForm(FlaskForm):
username = StringField()
accept = SubmitField('Accept')
decline = SubmitField('Decline')
class CalendarEventForm(FlaskForm):
dayNoteDate = StringField()
name = StringField('Event Name',[validators.Length(min=3, max=30)], render_kw={'placeholder': 'Event Name'})
inputTime = StringField()
class BudgetForm(FlaskForm):
budget = IntegerField([validators.NumberRange(min=10)], render_kw={'placeholder': 'My Trip Budget'})
submit5 = SubmitField('Submit')
class ExpenseForm(FlaskForm):
name = StringField([validators.Length(min=1, message="The expense name must be at least 2 characters long.")], render_kw={'placeholder': 'Expense Name'})
cost = IntegerField([validators.NumberRange(min=1)], render_kw={'placeholder': 'Price'})
payments = IntegerField([validators.NumberRange(min=1)], render_kw={'placeholder': '# of Payments'})
splits = IntegerField([validators.NumberRange(min=1)], render_kw={'placeholder': '# People to Split'})
type = SelectField('Type', choices=[('Rental', 'Rental'), ('Food/Drink', 'Food/Drink'), ('Transportation', 'Transportation'), ('Activity', 'Activity')])
submit6 = SubmitField('Calculate')
|
class Solution:
def strStr(self, haystack: 'str', needle: 'str') -> 'int':
# 当needle是空字符串时我们应当返回0 。这与C语言的strstr()以及Java的indexOf()定义相符
if len(needle) == 0 :return 0
# 不存在满足条件的子串
if len(needle) > len(haystack) :return -1
# 遍历比对
for h in range(len(haystack) - len(needle) + 1):
flag = True
for n in range(len(needle)):
if needle[n] != haystack[h+n]:
flag = False
break
if flag : return h
return -1
# 使用内置函数
def strStr2(self, haystack: 'str', needle: 'str') -> 'int':
if needle not in haystack:
return -1
return haystack.index(needle)
if __name__=='__main__':
s = Solution()
print(s.strStr(haystack="mississippi", needle="mississippi"))
|
def extractFmgandalfWordpressCom(item):
'''
Parser for 'fmgandalf.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
chp_prefixes = [
('BS ', 'Black Summoner', 'translated'),
('Mitsy', 'Makikomarete Isekai Teni suru Yatsu wa', 'translated'),
('Gun- Ota ', 'Gun-ota ga Majou Sekai ni Tensei Shitara', 'translated'),
('Gun OTA ', 'Gun-ota ga Majou Sekai ni Tensei Shitara', 'translated'),
('Gun-Ota ', 'Gun-ota ga Majou Sekai ni Tensei Shitara', 'translated'),
('GOGMS ', 'Gun-ota ga Majou Sekai ni Tensei Shitara', 'translated'),
('SS HH ', 'Self-proclaimed! An Ordinary Demonic Hero’s life ~ The Result of Creating a Cheat Dungeon Despite Being a B-class Demon', 'translated'),
('SSHH', 'Self-proclaimed! An Ordinary Demonic Hero’s life ~ The Result of Creating a Cheat Dungeon Despite Being a B-class Demon', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
import logging
import http.server
import socketserver
import getpass
#Classe MyHTTPHandler herdar de http.server.SimpleHTTPRequestHandler
#Reescrever o método log_message, personalizar
#Logar IP e Data e passar os argumentos
class MyHTTPHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
logging.info("%s - - [%s] %s\n"% (
self.client_address[0],
self.log_date_time_string(),
format%args
))
#Passar o nome do arquvio que quero logar
#Formatação do log, tempo e mensagem
logging.basicConfig(
filename='/log/http-server.log',
format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO
)
#Logar que ta iniciando o processo
#Definir a porta 8000
logging.getLogger().addHandler(logging.StreamHandler())
logging.info('inicializando...')
PORT = 8000
#Passando a porta e o handler da classe
#E do método reescrito
#E por ultimo iniciar o servidor nessa porta
httpd = socketserver.TCPServer(("", PORT), MyHTTPHandler)
logging.info('escutando a porta:%s', PORT)
logging.info('usuario: %s', getpass.getuser())
httpd.serve_forever()
|
import random
choices = ["Pierre", "Papier", "Ciseaux"]
print("if you want to end write 'End'")
computer = random.choice(choices)
player = False
cpu_score = 0
player_score = 0
while True:
player = input("Pierre, Papier, Ciseaux?").capitalize()
if player == computer:
print("Play Again")
elif player == "Pierre":
if computer == "Papier":
print("You loose")
cpu_score+=1
else:
print("You win")
player_score+=1
elif player == "Papier":
if computer == "Ciseaux":
print("You loose")
cpu_score+=1
else:
print("You win")
player_score+=1
elif player == "Ciseaux":
if computer == "Pierre":
print("You loose")
cpu_score+=1
else:
print("You win")
player_score+=1
elif player == "End":
print("Your score:",player_score)
print("Computer score", cpu_score)
break |
def initL1RSSubsystemsExt( tagBaseVec = [],
# L1MuDTTFMasksRcdKey = 'dummy',
):
import FWCore.ParameterSet.Config as cms
from CondTools.L1TriggerExt.L1CondEnumExt_cfi import L1CondEnumExt
initL1RSSubsystemsExt.params = cms.PSet( recordInfo = cms.VPSet() )
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
import argparse
from itertools import islice
from six import binary_type
import tabulate
from .mpf import MPRowsFile
from .__meta__ import __version__
def make_arg_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog='ampr',
description='Ambry Message Pack Rows file access version:'.format(__version__))
parser.add_argument('-m', '--meta', action='store_true',
help='Show metadata')
parser.add_argument('-s', '--schema', action='store_true',
help='Show the schema')
parser.add_argument('-S', '--stats', action='store_true',
help='Show the statistics')
parser.add_argument('-t', '--types', action='store_true',
help='Show type resolution information')
parser.add_argument('-H', '--head', action='store_true',
help='Display the first 10 records. Will only display 80 chars wide')
parser.add_argument('-T', '--tail', action='store_true',
help='Display the first last 10 records. Will only display 80 chars wide')
parser.add_argument('-r', '--records', action='store_true',
help='Output the records in tabular format')
parser.add_argument('-R', '--raw', action='store_true',
help='For the sample output, use the raw iterator')
parser.add_argument('-j', '--json', action='store_true',
help='Output the entire file as JSON')
parser.add_argument('-c', '--csv', help='Output the entire file as CSV')
parser.add_argument('-l', '--limit', help='The number of rows to output for CSV or JSON')
parser.add_argument('path', nargs=1, type=binary_type, help='File path')
return parser
def main(args=None):
from operator import itemgetter
from datetime import datetime
if not args:
parser = make_arg_parser()
args = parser.parse_args()
if isinstance(args.path[0], MPRowsFile):
f = args.path[0] # When it is called from the ambry cli with a remote file.
else:
f = MPRowsFile(args.path[0])
r = f.reader
schema_fields = ['pos', 'name', 'type', 'resolved_type', 'description', 'start', 'width']
schema_getter = itemgetter(*schema_fields)
types_fields = ['header', 'type_count', 'length', 'floats', 'ints', 'unicode', 'strs', 'dates',
'times', 'datetimes', 'nones', 'has_codes']
types_getter = itemgetter(*types_fields)
stats_fields_all = ['name', 'stat_count', 'nuniques', 'mean', 'min', 'p25', 'p50', 'p75', 'max', 'std',
'uvalues', 'lom', 'skewness', 'kurtosis', 'flags', 'hist', 'text_hist']
stats_fields = ['name', 'lom', 'stat_count', 'nuniques', 'mean', 'min', 'p25', 'p50', 'p75',
'max', 'std', 'text_hist']
stats_getter = itemgetter(*stats_fields)
if args.csv:
import unicodecsv as csv
with f.reader as r:
limit = int(args.limit) if args.limit else None
with open(args.csv, 'wb') as out_f:
w = csv.writer(out_f)
w.writerow(r.headers)
for i, row in enumerate(r.rows):
w.writerow(row)
if limit and i >= limit:
break
return
def pm(l, m):
"""Print, maybe"""
if not m:
return
m = binary_type(m).strip()
if m:
print('{:<12s}: {}'.format(l, m))
with f.reader as r:
try:
path = f.syspath
except:
path = '{} / {}'.format(f._fs, f._path)
pm('MPR File', path)
pm('Created',
(r.meta['about']['create_time'] and datetime.fromtimestamp(r.meta['about']['create_time'])))
pm('version', r.info['version'])
pm('rows', r.info['rows'])
pm('cols', r.info['cols'])
pm('header_rows', r.info['header_rows'])
pm('data_row', r.info['data_start_row'])
pm('end_row', r.info['data_end_row'])
ss = r.meta['source']
pm('URL', ss['url'])
pm('encoding', ss['encoding'])
if args.schema:
print('\nSCHEMA')
with f.reader as r:
print(tabulate.tabulate((schema_getter(row.dict) for row in r.columns), schema_fields))
if args.stats:
with f.reader as r:
print('\nSTATS')
print(tabulate.tabulate((stats_getter(row.dict) for row in r.columns), stats_fields))
if args.types:
with f.reader as r:
print('\nTYPES')
print(tabulate.tabulate((types_getter(row.dict) for row in r.columns), types_fields))
if args.head or args.tail:
with f.reader as r:
print('\nHEAD' if args.head else '\nTAIL')
MAX_LINE = 80
headers = []
# Only show so may cols as will fit in an 80 char line.
for h in r.headers:
if len(' '.join(headers+[h])) > MAX_LINE:
break
headers.append(h)
itr = r.raw if args.raw else r.rows
rows = []
start, end = (None, 10) if args.head else (r.n_rows-10, r.n_rows)
slc = islice(itr, start, end)
rows = [(i,)+row[:len(headers)] for i, row in enumerate(slc, start if start else 0)]
print(tabulate.tabulate(rows, ['#'] + headers))
elif args.records:
with f.reader as r:
acc = []
try:
for i, row in enumerate(r.rows, 1):
if i % 30 == 0:
print (tabulate.tabulate(acc, r.headers))
acc = []
else:
acc.append(row)
if args.limit and i > int(args.limit):
if acc:
print (tabulate.tabulate(acc, r.headers))
acc = []
break
if acc:
print (tabulate.tabulate(acc, r.headers))
except KeyboardInterrupt:
import sys
sys.exit(0)
if __name__ == '__main__':
main()
|
IN_H_HEADER = r"""
#include <Arduino.h>
#include "SerialCommand.h"
#include "inodriver_user.h"
const char COMPILE_DATE_TIME[] = __DATE__ " " __TIME__;
void ok();
void error(const char*);
void error_i(int);
void bridge_loop();
"""
IN_CPP_HEADER = r"""
#include "inodriver_bridge.h"
SerialCommand sCmd;
void ok() {
Serial.println("OK");
}
void error(const char* msg) {
Serial.print("ERROR: ");
Serial.println(msg);
}
void error_i(int errno) {
Serial.print("ERROR: ");
Serial.println(errno);
}
void bridge_loop() {
while (Serial.available() > 0) {
sCmd.readSerial();
}
}
"""
IN_H_BODY = r"""
void getInfo();
void unrecognized(const char *);
"""
IN_CPP_BODY = r"""
//// Code
void getInfo() {
Serial.print("%s,");
Serial.println(COMPILE_DATE_TIME);
}
void unrecognized(const char *command) {
error("Unknown command");
}
//// Auto generated Feat and DictFeat Code
"""
IN_SETUP = r"""
//// Setup callbacks for SerialCommand commands
// All commands might return
// ERROR: <error message>
// All set commands return
// OK
// if the operation is successfull
// All parameters are ascii encoded strings
sCmd.addCommand("INFO?", getInfo);
sCmd.setDefaultHandler(unrecognized);
""" |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-23 10:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0088_expensecategory_order'),
('content', '0087_report_permissions'),
]
operations = [
]
|
# from . import archive_outputs
from pypyr.context import Context
from pypyr.errors import KeyNotInContextError
from . import cmd, py
def get_formatted_or_default(self: Context, key: str, default):
try:
return self.get_formatted(key)
except (KeyNotInContextError, KeyError):
return default
except TypeError:
return self.get(key)
except Exception as err:
raise ValueError(f"extracting {key} from context") from err
Context.get_formatted_or_default = get_formatted_or_default
def get_formatted_or_raw(self: Context, key: str):
try:
return self.get_formatted(key)
except TypeError:
return self.get(key)
except Exception as err:
raise ValueError(f"extracting {key} from context") from err
Context.get_formatted_or_raw = get_formatted_or_raw
|
import random
import smtplib
import string
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from flask import render_template, request, url_for, redirect, session
from project import app
from project.com.controller.LoginController import adminLoginSession, adminLogoutSession
from project.com.dao.LoginDAO import LoginDAO
from project.com.dao.RegisterDAO import RegisterDAO
from project.com.vo.LoginVO import LoginVO
from project.com.vo.RegisterVO import RegisterVO
@app.route('/user/loadRegister')
def userLoadRegister():
try:
return render_template("user/addRegister.html")
except Exception as ex:
print(ex)
@app.route('/user/insertRegister', methods=['POST'])
def userInsertRegister():
try:
loginVO = LoginVO()
loginDAO = LoginDAO()
registerVO = RegisterVO()
registerDAO = RegisterDAO()
loginUsername = request.form['loginUsername']
registerFirstname = request.form['registerFirstname']
registerLastname = request.form['registerLastname']
registerGender = request.form['registerGender']
registerContactNumber = request.form['registerContactNumber']
loginPassword = ''.join((random.choice(string.ascii_letters + string.digits)) for x in range(8))
sender = "universitypredictor@gmail.com"
receiver = loginUsername
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = "LOGIN PASSWORD"
msg.attach(MIMEText(loginPassword, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender, "UniversityPredictor")
text = msg.as_string()
server.sendmail(sender, receiver, text)
loginVO.loginUsername = loginUsername
loginVO.loginPassword = loginPassword
loginVO.loginRole = "user"
loginVO.loginStatus = "active"
loginDAO.insertLogin(loginVO)
registerVO.registerFirstname = registerFirstname
registerVO.registerLastname = registerLastname
registerVO.registerGender = registerGender
registerVO.registerContactNumber = registerContactNumber
registerVO.register_LoginId = loginVO.loginId
registerDAO.insertRegister(registerVO)
server.quit()
return redirect(url_for('adminLoadLogin'))
except Exception as ex:
print(ex)
@app.route('/user/editRegister', methods=['GET'])
def userEditRegister():
try:
if adminLoginSession() == 'user':
registerDAO = RegisterDAO()
registerVO = RegisterVO()
register_LoginId = session['session_loginId']
registerVO.register_LoginId = register_LoginId
registerVOList = registerDAO.userEditRegister(registerVO)
print(registerVOList)
return render_template('user/editRegister.html', registerVOList=registerVOList)
else:
return adminLogoutSession()
except Exception as ex:
print(ex)
@app.route('/user/updateRegister', methods=['post'])
def userUpdateRegister():
try:
loginId = request.form['loginId']
loginUsername = request.form['loginUsername']
registerId = request.form['registerId']
registerFirstname = request.form['registerFirstname']
registerLastname = request.form['registerLastname']
registerGender = request.form['registerGender']
registerContactNumber = request.form['registerContactNumber']
loginVO = LoginVO()
loginDAO = LoginDAO()
loginVO.loginId = loginId
loginVOList = loginDAO.editLogin(loginVO)
if loginVOList.loginUsername != loginUsername:
loginVO.loginUsername = loginUsername
loginPassword = ''.join((random.choice(string.ascii_letters + string.digits)) for x in range(8))
sender = "universitypredictor@gmail.com"
receiver = loginUsername
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = receiver
msg['Subject'] = "LOGIN PASSWORD"
msg.attach(MIMEText(loginPassword, 'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender, "UniversityPredictor")
text = msg.as_string()
server.sendmail(sender, receiver, text)
server.quit()
loginDAO.updateLogin(loginVO)
registerVO = RegisterVO()
registerDAO = RegisterDAO()
registerVO.registerId = registerId
registerVO.registerFirstname = registerFirstname
registerVO.registerLastname = registerLastname
registerVO.registerContactNumber = registerContactNumber
registerVO.registerGender = registerGender
registerVO.register_LoginId = loginId
registerDAO.userUpdateRegister(registerVO)
return redirect('/user/loadDashboard')
except Exception as ex:
print(ex)
@app.route('/admin/viewRegister')
def adminViewRegister():
try:
if adminLoginSession() == "admin":
registerDAO = RegisterDAO()
registerVOList = registerDAO.adminViewRegister()
return render_template("admin/viewRegister.html", registerVOList=registerVOList)
else:
return adminLogoutSession()
except Exception as ex:
print(ex)
@app.route('/admin/blockUser', methods=['GET'])
def adminBlockUser():
try:
if adminLoginSession() == 'admin':
loginDAO = LoginDAO()
loginVO = LoginVO()
loginId = request.args.get('loginId')
loginStatus = 'deactive'
loginVO.loginId = loginId
loginVO.loginStatus = loginStatus
loginDAO.blockUser(loginVO)
return adminViewRegister()
else:
return adminLogoutSession()
except Exception as ex:
print(ex)
|
# -*- coding: utf-8 -*-
import json
f = codecs.open("keywords.txt", encoding='utf-8', mode='r')
kw=f.readlines()
f.close()
searches=[]
for i in range(len(kw)):
s=kw[i][:-1].split("\t")
if len(s)>1:
searches.append(s)
output=[]
for s in searches:
s1=s[0].replace("/search?","").split("&")
s2=[]
for t in s1:
t=t.split("=")
if len(t)==2:
if t[1]<>"":
s2.append({t[0]:t[1]})
s3={}
for t in s2:
k=t.keys()[0]
if k[:5]=="value":
o=k.replace("value","option")
ov=""
for t2 in s2:
if t2.keys()[0]==o:
ov=t2.values()[0]
break
if ov<>"":
kv=t.values()[0]
s3[ov]=kv
else:
if k[:6]<>"option":
s3[t.keys()[0]]=t.values()[0]
s3["visits"]=s[1]
#if "fullText" in s3.keys():
output.append(s3)
f = codecs.open("output.txt", encoding='utf-8', mode='wb')
f.write(json.dumps(output,encoding='latin1'))
f.close()
|
#Code for the Rotten Tomatoes Kaggle contest
#Import libraries
print('Importing needed libraries...')
import pandas as pd
import numpy as np
from nltk.tokenize import word_tokenize
import itertools
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import copy
from keras.models import Sequential
from keras.layers import Dense,Activation
from keras.layers import Flatten, Dropout, Convolution1D
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
#Import data
print('Importing data and compiling vocabulary in progress...')
df_train_ini = pd.read_csv('train.tsv', sep = '\t')
#df_test = pd.read_csv('test.tsv', sep = '\t')
PhraseID = df_train_ini['PhraseId'][5200:6000] #keep to build the submission file
df_train_ini = df_train_ini.drop(df_train_ini.columns[[0,1]], axis = 1)
#df_test = df_test.drop(df_test.columns[[0,1]], axis = 1)
df_train = df_train_ini.iloc[1:1200,]
df_test = df_train_ini.iloc[1200:1500,]
#Tokenize sentences
#1. Train set
text_list_train = list(df_train['Phrase'])
tokenized_text_train = [word_tokenize(i) for i in text_list_train]
#2. Test set
text_list_test = list(df_test['Phrase'])
tokenized_text_test = [word_tokenize(i) for i in text_list_test]
#Create vocabulary from train set only
list_of_all_words_in_train_set = list(itertools.chain.from_iterable(tokenized_text_train))
vocabulary = sorted(list(set(list_of_all_words_in_train_set)))
#Remove stopwords
#vocabulary = [word for word in vocabulary if word not in stopwords.words('english')]
vectorizer=CountVectorizer()
vectorizer.fit_transform(df_train)
smatrix = vectorizer.transform(df_test)
smatrix.todense()
#-----------------------Pre-processing ---------------------------
print('Pre-processing Train set...')
tokenized_numbers_train = copy.deepcopy(tokenized_text_train)
i=-1
for list in tokenized_numbers_train:
i=i+1
j=-1
for number in list:
j = j + 1
if tokenized_numbers_train[i][j] in vocabulary:
tokenized_numbers_train[i][j]= vocabulary.index(number)
else:
tokenized_numbers_train[i][j] = 0
tokens_train = pd.DataFrame(tokenized_numbers_train, dtype='int32')
tokens_train = tokens_train.fillna(0)
tokens_train = tokens_train.astype(int)
print('Pre-processing Test set...')
tokenized_numbers_test = copy.deepcopy(tokenized_text_test)
i=-1
for list in tokenized_numbers_test:
i=i+1
j=-1
for number in list:
j = j + 1
if tokenized_numbers_test[i][j] in vocabulary:
tokenized_numbers_test[i][j] = vocabulary.index(number)
else:
tokenized_numbers_test[i][j] = 0
tokens_test = pd.DataFrame(tokenized_numbers_test, dtype='int32')
tokens_test = tokens_test.fillna(0)
tokens_test = tokens_test.astype(int)
#--------------------End of Pre-processing---------------------------
print('Making some more pre-processing to train and test sets...')
#Bring both sets to same shape (Choose how many words to use)
max_words_in_sentence=40
#Shorten or extend Train set to reach selected length
if tokens_train.shape[1]>max_words_in_sentence:
tokens_train = tokens_train.drop(tokens_train.columns[[range(max_words_in_sentence,tokens_train.shape[1])]], axis=1)
else:
for col in range(tokens_train.shape[1],max_words_in_sentence):
tokens_train[col]=0
#Shorten or extend Test set to reach selected length
if tokens_test.shape[1] > max_words_in_sentence:
tokens_test = tokens_test.drop(tokens_test.columns[[range(max_words_in_sentence, tokens_test.shape[1])]],
axis=1)
else:
for col in range(tokens_test.shape[1], max_words_in_sentence):
tokens_test[col] = 0
#Define train and Test sets
train_x = np.array(tokens_train)
train_y = np.array(df_train['Sentiment'])
test_x = np.array(tokens_test)
test_y = np.array(df_test['Sentiment'])
#Transform target variable to One-Hot Encoding
encoder1 = LabelEncoder()
encoder1.fit(train_y)
encoded_train_Y = encoder1.transform(train_y)
dummy_train_y = np_utils.to_categorical(encoded_train_Y)
dummy_train_y.astype(int)
l=len(vocabulary)+1
inp=train_x.shape[1]
#Build a Convolutional Network model
print('Building the best model in the world...')
model = Sequential()
model.add(Embedding(l, 32, input_length=inp))
model.add(Convolution1D(32, 3, padding='same'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
print('Fitting the best model in the world...')
model.fit(train_x, dummy_train_y, epochs=30, batch_size=8, verbose=2)
# Predict test set
print('Predicting test set...')
test_sentiments = model.predict(test_x)
test_sentiments[test_sentiments<0.5]=0
test_sentiments[test_sentiments>0.5]=1
test_sentiments.astype(int)
Prediction = np.argmax(test_sentiments, axis=1)
print('Accuracy:', accuracy_score(test_y,Prediction))
|
#! bin/python
from pylab import *
from matplotlib import patches
from Function import *
rcParams['xtick.direction'] = 'in'
rcParams['ytick.direction'] = 'in'
class Cursor():
def __init__(self, ax):
self.Inc = 45.
self.PA = 0
self.Vr10 = 10
self.vmax = 5
self.vmin = -5
self.cmap = 'bwr'
self.extent = bound+binn/2
self.ax = ax
self.figure_layout() # preset the layout
def draw_figure(self, Vmod):
self.ax.cla() # clear the main axes
self.cax.cla() # clear the colorbar
im = self.ax.imshow(Vmod, extent=(self.extent, -self.extent, -self.extent, self.extent), zorder=0,
vmax=self.vmax, vmin=self.vmin, cmap=self.cmap, origin='lower')
self.ax.annotate(r'$\theta_{\rm inc}$='+'%.d'%self.Inc+'$^\circ$',
xycoords='axes fraction', xy=(0.05, 0.9))
self.ax.annotate(r'$PA$='+'%.d'%self.PA+'$^\circ$',
xycoords='axes fraction', xy=(0.05, 0.8))
self.figure_layout()
cbar = colorbar(im,cax=self.cax)
cbar.set_label(r'km s$^{-1}$')
cbar.set_ticks(arange(self.vmin, self.vmax,1))
plt.draw()
def figure_layout(self):
self.ax.set_xlim(0.4, -0.4)
self.ax.set_ylim(-0.4, 0.4)
self.ax.set_xlabel(r'$\delta X$ (arcsec)')
self.ax.set_ylabel(r'$\delta Y$ (arcsec)')
self.ax.set_aspect('equal')
self.cax = axes([0.87, 0.1, 0.03, 0.87])
subplots_adjust(left=0.15, right=0.85, bottom=0.1, top=0.97)
def mouse_move(self,event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
self.PA=rad2deg(arctan(x/y))
self.PA=180+self.PA if y<0 else self.PA
Vmod=img_Model(Vr10=self.Vr10,xcen=0.,ycen=0.,Inc=self.Inc,PA=-self.PA,mod='Kep')
self.draw_figure(Vmod)
def scrolling(self,event):
if event.button=='up':
self.Inc += 2
elif event.button=='down':
self.Inc -= 2
Vmod=img_Model(Vr10=self.Vr10,xcen=0.,ycen=0.,Inc=self.Inc,PA=-self.PA,mod='Kep')
self.draw_figure(Vmod)
fig = figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
disk_model = Cursor(ax)
#disk_model.Vr10 = 8
#disk_model.cmap = 'jet'
mouse = disk_model
connect('motion_notify_event', mouse.mouse_move)
connect('scroll_event', mouse.scrolling)
Vmod=img_Model(Vr10=disk_model.Vr10,xcen=0.,ycen=0.,Inc=disk_model.Inc,PA=-disk_model.PA,mod='Kep')
disk_model.draw_figure(Vmod)
show()
|
from importlib import import_module
mod = import_module('testclass')
met = getattr(mod, 'Complex')
t= met(4.0, -4.5)
print t.__module__
print dir(t) |
import numpy as np
def smooth_x(wave, x, s, g,):
xx = x.shape[0]
xy = x.shape[1]
smooth = np.zeros([xx, xy])
for i in range(s+1, xy-s):
sa = np.mean(x[:, int(i - s):int(i + s)], axis=1)
smooth[:, i] = sa
return smooth
|
# Написать программу сложения и умножения двух шестнадцатеричных чисел. При
# этом каждое число представляется как массив, элементы которого — цифры числа.
# Например, пользователь ввёл A2 и C4F. Нужно сохранить их как [‘A’, ‘2’] и
# [‘C’, ‘4’, ‘F’] соответственно. Сумма чисел из примера: [‘C’, ‘F’, ‘1’],
# произведение - [‘7’, ‘C’, ‘9’, ‘F’, ‘E’].
from collections import OrderedDict, deque
stg = '0123456789ABCDEF'
my_dict = OrderedDict()
for i in range(16):
my_dict[stg[i]] = i
def in_hex(num):
res = deque()
while num > 0:
for key, value in my_dict.items():
if value == num % 16:
res.appendleft(key)
num //= 16
return list(res)
def in_dec(lst):
ln = len(lst) - 1
decnum = 0
for el in lst:
decnum += my_dict[el] * 16 ** ln
ln -= 1
return decnum
hexnum1 = list(input('Введите первое шестнадцатиричное число: ').upper())
print(f'Введено: {hexnum1}')
hexnum2 = list(input('Введите второе шестнадцатиричное число: ').upper())
print(f'Введено: {hexnum1}')
print(f" {''.join(hexnum1)} + {''.join(hexnum2)} = "
f"{''.join(in_hex(in_dec(hexnum1) + in_dec(hexnum2)))}")
print(f'Результат сложения: {in_hex(in_dec(hexnum1) + in_dec(hexnum2))}')
print(f" {''.join(hexnum1)} * {''.join(hexnum2)} = "
f"{''.join(in_hex(in_dec(hexnum1) * in_dec(hexnum2)))}")
print(f'Результат умножения: {in_hex(in_dec(hexnum1) * in_dec(hexnum2))}')
|
"# -*- coding"
"""
@author:xda
@file:fund_share_update.py
@time:2021/01/20
"""
# 基金份额
import sys
sys.path.append('..')
from configure.settings import DBSelector
from common.BaseService import BaseService
import requests
import warnings
import datetime
import math
import re
warnings.filterwarnings("ignore")
from sqlalchemy.orm import relationship
from sqlalchemy import Column, INTEGER, VARCHAR, DATE, DateTime, ForeignKey, FLOAT
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建对象的基类:
Base = declarative_base()
class FundBaseInfoModel(Base):
# 表的名字:
__tablename__ = 'LOF_BaseInfo'
# 表的结构:
id = Column(INTEGER, primary_key=True, autoincrement=True)
code = Column(VARCHAR(6), comment='基金代码', unique=True)
name = Column(VARCHAR(40), comment='基金名称')
category = Column(VARCHAR(8), comment='基金类别')
invest_type = Column(VARCHAR(6), comment='投资类别')
manager_name = Column(VARCHAR(48), comment='管理人呢名称')
issue_date = Column(DATE, comment='上市日期')
# child = relationship('ShareModel', back_populates='LOF_BaseInfo')
child = relationship('ShareModel')
def __str__(self):
return f'<{self.code}><{self.name}>'
class ShareModel(Base):
# 表的名字:
__tablename__ = 'LOF_Share'
# 表的结构:
id = Column(INTEGER, primary_key=True, autoincrement=True)
code = Column(VARCHAR(6), ForeignKey('LOF_BaseInfo.code'), comment='代码')
date = Column(DATE, comment='份额日期')
share = Column(FLOAT, comment='份额 单位:万份')
parent = relationship('FundBaseInfoModel')
# parent = relationship('FundBaseInfoModel', back_populates='LOF_Share')
crawltime = Column(DateTime, comment='爬取日期')
class Fund(BaseService):
def __init__(self, first_use=False):
super(Fund, self).__init__(f'../log/{self.__class__.__name__}.log')
self.first_use = first_use
self.engine = self.get_engine()
def get_engine(self):
return DBSelector().get_engine('db_stock')
def create_table(self):
# 初始化数据库连接:
Base.metadata.create_all(self.engine) # 创建表结构
def get_session(self):
return sessionmaker(bind=self.engine)
def get(self, url, retry=5, js=True):
start = 0
while start < retry:
try:
response = self.session.get(url, headers=self.headers,
verify=False)
except Exception as e:
self.logger.error(e)
start += 1
else:
if js:
content = response.json()
else:
content = response.text
return content
if start == retry:
self.logger.error('重试太多')
return None
class SZFundShare(Fund):
def __init__(self, first_use=False):
super(SZFundShare, self).__init__(first_use)
# self.url = 'http://fund.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=1000_lf&TABKEY=tab1&PAGENO={}&selectJjlb=LOF&random=0.019172632634173903'
self.all_fund_url = 'http://fund.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&CATALOGID=1000_lf&TABKEY=tab1&PAGENO={}&random=0.1292751130110099'
self.session = requests.Session()
self.logger.info('start...sz fund')
self.LAST_TEXT = ''
if self.first_use:
self.create_table()
self.db_session = self.get_session()
self.sess = self.db_session()
self.logger.info(f'{self.today} start to crawl....')
@property
def headers(self):
_header= {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Type": "application/json",
"Host": "fund.szse.cn",
"Pragma": "no-cache",
"Referer": "http://fund.szse.cn/marketdata/fundslist/index.html?catalogId=1000_lf&selectJjlb=ETF",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
"X-Request-Type": "ajax",
"X-Requested-With": "XMLHttpRequest",
}
return _header
def convert(self, float_str):
try:
return_float = float(float_str)
except:
return_float = None
return return_float
def json_parse(self, js_data):
date = (datetime.date.today() + datetime.timedelta(days=-1)).strftime('%Y-%m-%d')
# 手动算的前一天 ?
data = js_data[0].get('data', [])
if not data:
self.stop = True
return None
for item in data:
jjlb = item['jjlb']
tzlb = item['tzlb'] #
ssrq = item['ssrq']
name = self.extract_name(item['jjjcurl'])
dqgm = self.convert_number(item['dqgm']) # 当前规模
glrmc = self.extract_glrmc(item['glrmc']) # 管理人名称
code = self.extract_code(item['sys_key'])
yield (jjlb, tzlb, ssrq, dqgm, glrmc, code, name, date)
def extract_name(self, name):
return re.search('<u>(.*?)</u>', name).group(1)
def extract_code(self, code):
return re.search('<u>(\d{6})</u>', code).group(1)
def extract_glrmc(self, glrmc):
if re.search(('\<a.*?\>(.*?)\</a\>'), glrmc):
glrmc = re.search(('\<a.*?\>(.*?)\</a\>'), glrmc).group(1).strip()
return glrmc
def model_process(self, jjlb, tzlb, ssrq, dqgm, glrmc, code, name, date):
obj = self.sess.query(FundBaseInfoModel).filter_by(code=code).first()
if not obj:
base_info = FundBaseInfoModel(
code=code,
name=name,
category=jjlb,
invest_type=tzlb,
manager_name=glrmc,
issue_date=ssrq,
)
self.sess.add(base_info)
self.sess.commit()
share_info = ShareModel(
code=code,
date=date,
share=dqgm,
crawltime=datetime.datetime.now(),
)
self.sess.add(share_info)
self.sess.commit()
def convert_number(self, s):
return float(s.replace(',', ''))
def run(self):
page = 1
self.stop = False
while not self.stop:
content = self.get(self.all_fund_url.format(page))
for item in self.json_parse(content):
self.model_process(*item)
page += 1
class SHFundShare(Fund):
def __init__(self, kind,date,first_use=False):
super(SHFundShare, self).__init__(first_use)
self.lof_url = 'http://query.sse.com.cn/commonQuery.do?=&jsonCallBack=jsonpCallback1681&sqlId=COMMON_SSE_FUND_LOF_SCALE_CX_S&pageHelp.pageSize=10000&FILEDATE={}&_=161146986468'
self.etf_url = 'http://query.sse.com.cn/commonQuery.do?jsonCallBack=jsonpCallback28550&isPagination=true&pageHelp.pageSize=25&pageHelp.pageNo={}&pageHelp.cacheSize=1&sqlId=COMMON_SSE_ZQPZ_ETFZL_XXPL_ETFGM_SEARCH_L&STAT_DATE={}&pageHelp.beginPage={}&pageHelp.endPage=30&_=1611473902414'
# self.today_ = '20210122' # LOF
if date=='now':
self.today_ = (datetime.datetime.now()+ datetime.timedelta(days=-1)).strftime('%Y%m%d')
else:
self.today_=self.today = date
# self.today ='2021-01-22' # ETF
self.ETF_COUNT_PER_PAGE = 25
self.url_option_dict = {
'ETF': {'url': self.etf_url, 'date': self.today},
'LOF': {'url': self.lof_url, 'date': self.today_}
}
self.kind=kind.lower()
self.session = requests.Session()
self.logger.info('start...sh fund')
self.LAST_TEXT = ''
if self.first_use:
self.create_table()
self.db_session = self.get_session()
self.sess = self.db_session()
@property
def headers(self):
return {
"Host": "query.sse.com.cn",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Referer": "http://www.sse.com.cn/market/funddata/volumn/lofvolumn/",
}
def crawl_lof(self):
options = self.url_option_dict['LOF']
date = options.get('date')
url = options.get('url')
content = self.get(url.format(date), js=False)
js_data = self.jsonp2json(content)
self.process_lof(js_data)
def process_lof(self, js_data):
result = js_data.get('result')
for item in result:
code = item['FUND_CODE']
name = item['FUND_ABBR']
date = item['TRADE_DATE']
try:
share = float(item['INTERNAL_VOL'].replace(',',''))
except Exception as e:
print(e)
share=None
self.process_model(code, name, date, share, 'LOF')
def crawl_etf(self):
options = self.url_option_dict['ETF']
date = options.get('date')
url = options.get('url')
current_page = 1
while True:
content = self.get(url.format(current_page, date, current_page), js=False)
js_data = self.jsonp2json(content)
total_count = js_data.get('pageHelp').get('total')
print(f'page : {current_page}')
self.process_etf(js_data)
max_page = math.ceil(total_count / self.ETF_COUNT_PER_PAGE) # 每页 10个
if current_page > max_page:
break
current_page += 1
def process_etf(self, js_data):
result = js_data.get('result')
for item in result:
code = item['SEC_CODE']
name = item['SEC_NAME']
date = item['STAT_DATE']
share = item['TOT_VOL']
try:
share = float(share)
except Exception as e:
print(e)
self.process_model(code, name, date, share, 'ETF')
def run(self):
'LOF 与 ETF'
# for type_, options in self.url_option_dict.items():
if self.kind=='etf':
self.logger.info('crawling etf .....')
self.crawl_etf()
if self.kind=='lof':
self.logger.info('crawling lof .....')
self.crawl_lof()
def process_model(self, code, name, date, share, type_):
obj = self.sess.query(FundBaseInfoModel).filter_by(code=code).first()
if not obj:
obj = FundBaseInfoModel(
code=code,
name=name,
category=type_,
invest_type='',
manager_name='',
issue_date=None,
)
try:
self.sess.add(obj)
except Exception as e:
print(e)
else:
self.sess.commit()
print(f'插入一条记录{code},{date}')
if not self.sess.query(ShareModel).filter_by(code=code, date=date).first():
share_info = ShareModel(
code=code,
date=date,
share=share,
crawltime=datetime.datetime.now(),
)
try:
self.sess.add(share_info)
except Exception as e:
print(e)
else:
print(f'插入一条记录{code},{date}')
self.sess.commit()
if __name__ == '__main__':
app = SZFundShare(first_use=False)
app.run()
app = SHFundShare(first_use=False)
app.run()
|
# -*- coding: utf-8 -*-
from twisted.internet.protocol import Protocol
import logging
from twisted.internet import reactor
import struct
import ipaddress
from c2w.main.constants import ROOM_IDS
logging.basicConfig()
moduleLogger = logging.getLogger('c2w.protocol.tcp_chat_server_protocol')
class c2wTcpChatServerProtocol(Protocol):
def __init__(self, serverProxy, clientAddress, clientPort):
"""
:param serverProxy: The serverProxy, which the protocol must use
to interact with the user and movie store (i.e., the list of users
and movies) in the server.
:param clientAddress: The IP address (or the name) of the c2w server,
given by the user.
:param clientPort: The port number used by the c2w server,
given by the user.
Class implementing the TCP version of the client protocol.
.. note::
You must write the implementation of this class.
Each instance must have at least the following attribute:
.. attribute:: serverProxy
The serverProxy, which the protocol must use
to interact with the user and movie store in the server.
.. attribute:: clientAddress
The IP address of the client corresponding to this
protocol instance.
.. attribute:: clientPort
The port number used by the client corresponding to this
protocol instance.
.. note::
You must add attributes and methods to this class in order
to have a working and complete implementation of the c2w
protocol.
.. note::
The IP address and port number of the client are provided
only for the sake of completeness, you do not need to use
them, as a TCP connection is already associated with only
one client.
"""
#: The IP address of the client corresponding to this
#: protocol instance.
self.clientAddress = clientAddress
#: The port number used by the client corresponding to this
#: protocol instance.
self.clientPort = clientPort
#: The serverProxy, which the protocol must use
#: to interact with the user and movie store in the server.
self.serverProxy = serverProxy
self.filattente=[]
self.nom="" #sauvegarde de temporairement le nom de utilisateur lors de sa demande de connexion
self.monNumeroSequence=0 # numero de sequence du serveur qui sera incrémenté au fur et à mesure
# sauvegarde pour chaque utilisateur le couple (host_port,numero de sequence attendu)
self.controlNumeroSequence=[]
# pour le traitement du paquet recu
self.temp = b''
#Fonction qui permet d'incrementer le numero de sequence jusqu'à 4095
def incrementerNumeroSequence(self,numSequence):
if (numSequence==4095): #4095= (2 exposant 12)-1
numSequence=0
else:
numSequence+=1
return numSequence
# fonction pour verifier si on a recu un acquittement
def traiterAcquittement(self,numSeq,hostPort):
for j in self.filattente:
if (j[4]==hostPort):
if (j[0]==numSeq):
j[2]=1
print("Acquittement bien recu")
#fonction pour envoyer le paquet si jamais on a toujours pas recu d ack
def send_And_Wait(self,hostPort):
for j in self.filattente:
if (j[4]==hostPort):
if (j[1] <= 7):
if (j[2] == 0):
self.transport.write(j[3])
j[1]+=1
reactor.callLater(1,self.send_And_Wait,j[4])
elif(j[2] == 1):
print("etat de la liste avant suppression",self.filattente)
self.filattente.remove(j)
print("etat de la liste apres suppression",self.filattente)
else:
print("le paquet a djaaaaaaa")
self.filattente.remove(j)
if(len(self.serverProxy.getUserList())!=0):
user = self.serverProxy.getUserByAddress(hostPort)
print(user)
print("Supprimons l'utilisateur car il ne repond plus")
self.serverProxy.updateUserChatroom(user.userName,ROOM_IDS.OUT_OF_THE_SYSTEM_ROOM)
self.serverProxy.removeUser(self.serverProxy.getUserByAddress(hostPort).userName)
for u in self.controlNumeroSequence:
if(u[0]==hostPort):
self.controlNumeroSequence.remove(u)
print("SUCCES DE LA SUPPRESSION DU USER")
#Mise à jour de la liste des utilisateurs
#le paquet sera envoyé à chaque utilisateur de la main room
for u in self.serverProxy.getUserList():
if (u.userChatRoom==ROOM_IDS.MAIN_ROOM):
print("le paquet sera envoyé à chaque utilisateur de la main room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MAIN_ROOM)
self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver,u.userAddress])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
reactor.callLater(1,self.send_And_Wait,u.userAddress)
print("MISE A JOUR TERMINEE ")
print("YOUPIIIIIIIIIIIIII")
#fonction pour construire le paquet pour la liste des films
def paquetListFilms(self):
paquetMovie=bytearray()
compteur=0
print("////////////////////////////////////////",self.serverProxy.getMovieList())
for k in self.serverProxy.getMovieList():
longueurFilm= 0
ipFilm=k.movieIpAddress
convIpFilm=int(ipaddress.IPv4Address(ipFilm)) #convertit l'adresse ip en un entier
portFilm=k.moviePort
idFilm=k.movieId
print("l'identifiant du film est :",idFilm)
titreFilm=k.movieTitle
longueurFilm= 9+len(titreFilm)
compteur=compteur+longueurFilm
paquetMovie+=struct.pack('!Ihhb%rs'%len(titreFilm),convIpFilm,portFilm,longueurFilm,idFilm,titreFilm.encode('utf-8'))
print("le corps est :",paquetMovie)
TypEnvoieFilm= 5
NumSeq=1
seqTypEnvoieFilm= (NumSeq << 4) | TypEnvoieFilm
compteur= compteur+4
entete=struct.pack('!hh',compteur,seqTypEnvoieFilm)
paquetTotal=entete+paquetMovie
print("l'entete est :",entete)
print("le paquet total est:",paquetTotal)
return paquetTotal
# fonction pour former le paquet de la liste des utilisateurs
def paquetListUser(self,numSeq,room):
paquetUserMain=bytearray()
paquetUserM=bytearray()
paquetOnlyUserInMovie=bytearray()
"""la variable suivante contiendra soit les utilisateurs dans la main room
et ceux de toutes les movies room ou soit d'une movie room specifique """
paquetUserFinal=bytearray()
compteur=0
# construction de paquet en direction de la main room
if (room==ROOM_IDS.MAIN_ROOM):
for u in self.serverProxy.getUserList():
print("la liste des users pour c2w est:",self.serverProxy.getUserList())
if(u.userChatRoom==ROOM_IDS.MAIN_ROOM):
statut=0
nomUtilisateurA=u.userName
#compteur=compteur+2+len(usernameA)
paquetUserMain+=struct.pack('!bb%is'%len(nomUtilisateurA),len(nomUtilisateurA),statut,nomUtilisateurA.encode('utf−8'))
else:
statut=1
nomUtilisateurM=u.userName
#compteur=compteur+2+len(usernameM)
paquetUserM+=struct.pack('!bb%is'%len(nomUtilisateurM),len(nomUtilisateurM),statut,nomUtilisateurM.encode('utf−8'))
paquetUser=paquetUserMain+paquetUserM
paquetUserFinal=paquetUser
# construction de paquet en direction d'une movie room
else :
for u in self.serverProxy.getUserList():
if (u.userChatRoom!=ROOM_IDS.MAIN_ROOM ):
for m in self.serverProxy.getMovieList():
if(m.movieTitle==u.userChatRoom):
statut=m.movieId
nomUtilisateurMovie=u.userName
print("//////////////////////////////////////////",nomUtilisateurMovie,"movieid",statut)
paquetOnlyUserInMovie+=struct.pack('!bb%is'%len(nomUtilisateurMovie),len(nomUtilisateurMovie),statut,nomUtilisateurMovie.encode('utf−8'))
paquetUserFinal=paquetOnlyUserInMovie
print("le corps du paquet utilisateur est :",paquetUserFinal)
TypEnvoieUser= 6
seqTypEnvoieFilm= (numSeq << 4) | TypEnvoieUser
compteur= len(paquetUserFinal)+4
entete=struct.pack('!hh',compteur,seqTypEnvoieFilm) # construction de l'entete
paquetTotal=entete+paquetUserFinal
print("l'entete est :",entete)
print("lepaquet total est:",paquetTotal)
return paquetTotal
def dataReceived(self, data):
"""
:param data: The data received from the client (not necessarily
an entire message!)
Twisted calls this method whenever new data is received on this
connection.
"""
#On verifie si on a recu au moins l'entête du paquet
self.temp += data
if (len(self.temp) >= 4):
reception=struct.unpack('!hh%is'%(len(self.temp)-4),self.temp)
print(reception)
longueur= int(reception[0])
msg= str(reception[2].decode('utf-8'))
seqType= int(str(reception[1]))
Type= seqType & 15
NumSeq=seqType >> 4
print("la longeur est",longueur)
print("le username est ", msg)
print("le type est ",Type)
print("le numero de sequence est", NumSeq)
print("la longeur est du self temp est ",len(self.temp))
if( longueur == len(self.temp)):
print("Tout le paquet est arrivé")
#traitemnts
self.traitementTCP(self.temp)
self.temp = b''
else:
print("paquet en cours de chargement...")
pass
def traitementTCP(self, data):
host_port = (self.clientAddress, self.clientPort)
reception=struct.unpack('!hh%is'%(len(data)-4),data)
print(reception)
longueur= int(reception[0])
msg= str(reception[2].decode('utf-8'))
seqType= int(str(reception[1]))
Type= seqType & 15
NumSeq=seqType >> 4
"""if (Type!=0 and Type!=1):
for u in self.serverProxy.getUserList():
if (u.userAddress==host_port):
u.userChatInstance.monNumeroSequence=NumSeq"""
#permet de definir le numero de sequence attendu pour chaque utilisateur
if(len(self.controlNumeroSequence)!=0):
for u in self.controlNumeroSequence:
if(u[0]==host_port):
if (Type==3 or Type==4 or Type==9):
u[1]+=1
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%",u[1])
print("Tout le paquet est arrivé 2")
# cette portion est exécutée lorsqu'on reçoit un acquittement
if(Type==0):
self.traiterAcquittement(NumSeq,host_port)
# On envoie une acceptation ou un refus de connexion après la demande de connexion du client
if (Type==1):
self.nom=msg
TypeAcq = 0
decalage= NumSeq << 4
seqTypAcq= decalage | TypeAcq
print("sequence et type concaténé pour le 1er acquittement est", seqTypAcq)
bufserver= struct.pack('!hh',4,seqTypAcq)
self.transport.write(bufserver)
#fin premier acquittement après reception
""" s il depasse le nombre de caractere permis ou si le pseudo est déja
utilisé, on lui envoie un message d erreur"""
if(len(self.nom)>251 or self.serverProxy.userExists(self.nom)):
TypeRejectConn= 8
monNumSeq=0
seqTypRejectConn= (monNumSeq << 4) | TypeRejectConn
bufserver= struct.pack('!hh',4,seqTypRejectConn)
self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
self.transport.write(bufserver)
print(self.serverProxy.getUserList())
self.filattente.append([0,1,0,bufserver,host_port])
reactor.callLater(1,self.send_And_Wait,host_port)
#Acceptation de connexion
else:
TypeAccepetConn= 7
monNumSeq=0
seqTypAccepetConn= (monNumSeq << 4) | TypeAccepetConn
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
print("sequence et type concaténé pour l'acceptation est",seqTypAccepetConn)
bufserver= struct.pack('!hh',4,seqTypAccepetConn)
self.transport.write(bufserver)
self.filattente.append([0,1,0,bufserver,host_port])
reactor.callLater(1,self.send_And_Wait,host_port)
#Fin Acceptation de connexion
# On envoie la liste des films
if(Type==0 and NumSeq==0):
print("oooooooooooooooooooooooooooooooooooooaaaaaaaaaaaaaaaaaaaaaaaaaaa")
#Ajout de l'utilisateur à la liste gérée par le serveur
self.serverProxy.addUser(self.nom,ROOM_IDS.MAIN_ROOM,self,host_port)
#Ajout de l'utilisateur à la liste de controle gérée par nous-meme
self.controlNumeroSequence.append([host_port,0])
print("la liste des users pour c2w est:",self.serverProxy.getUserList())
print("il ya dans la liste", len(self.serverProxy.getUserList()))
print("un utilisateur lambda dans la liste des users pour c2w est:",self.serverProxy.getUserList()[0].userName)
print("********************************",self.nom)
bufserver=self.paquetListFilms()
self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
self.transport.write(bufserver)
self.filattente.append([1,1,0,bufserver,host_port])
reactor.callLater(1,self.send_And_Wait,host_port)
# On informe les autres utilisateurs qui sont dans la main room de l'arrivée du nouveau
if(len(self.serverProxy.getUserList())>0):
newUser = self.serverProxy.getUserByAddress(host_port).userName
print("BIENVENUE",newUser)
for u in self.serverProxy.getUserList():
if (u.userChatRoom==ROOM_IDS.MAIN_ROOM):
if (u.userName != newUser ) :
if(u.userAddress!=host_port):
print("le paquet sera envoyé à chaque utilisateur de la main room")
print("##########################################",u.userChatInstance.monNumeroSequence)
paquet=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MAIN_ROOM)
#self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,paquet,u.userAddress])
u.userChatInstance.transport.write(paquet)
print("le paquet est",paquet)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
#reactor.callLater(1,self.send_And_Wait,u.userAddress)
# On envoie la liste des utilisateurs
if (Type==0 and NumSeq==1):
print("zezezezezezezezezezezezezezezezezezezezeezezezezezeze")
#print(ROOM_IDS.MAIN_ROOM)
bufserver=self.paquetListUser(2,ROOM_IDS.MAIN_ROOM)
self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
self.transport.write(bufserver)
self.filattente.append([2,1,0,bufserver,host_port])
reactor.callLater(1,self.send_And_Wait,host_port)
#Lorsqu'on recoit une demande pour acceder à la movie room
if(Type==3):
print("DEMANDE D'ACCES A UNE MOVIE ROOM")
#envoie de l'acquittement
TypeAcq = 0
decalage= NumSeq << 4
seqTypAcq= decalage | TypeAcq
print("sequence et type concaténé pour le 1er acquittement est", seqTypAcq)
bufserver= struct.pack('!hh',4,seqTypAcq)
self.transport.write(bufserver)
# Fin de l'envoie de l'acquittement
for u in self.controlNumeroSequence:
if(u[0]==host_port and u[1]==NumSeq):
nameOfRoom=str(reception[2].decode('utf-8'))
#self.roomName=roomName
user=self.serverProxy.getUserByAddress(host_port)
if(user.userChatRoom==ROOM_IDS.MAIN_ROOM):
print("il y a un utilisateur qui veut regarder :"+nameOfRoom)
self.serverProxy.startStreamingMovie(nameOfRoom)
username = self.serverProxy.getUserByAddress(host_port).userName
print(username)
print(host_port)
self.serverProxy.updateUserChatroom(username,nameOfRoom)
#Mise à jour des la liste des utilisateurs
print("la liste des users MAJ pour c2w est:",self.serverProxy.getUserList())
for u in self.serverProxy.getUserList():
#paquet en direction des users de la main room
if (u.userChatRoom==ROOM_IDS.MAIN_ROOM):
print("le paquet sera envoyé à chaque utilisateur de la main room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MAIN_ROOM)
#self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver,u.userAddress])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
#reactor.callLater(1,self.send_And_Wait,u.userAddress)
#paquet en direction des users des movies room
else:
print("le paquet sera envoyé à chaque utilisateur de la movie room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MOVIE_ROOM)
print("le paquet est",bufserver)
#self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver,u.userAddress])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
#reactor.callLater(1,self.send_And_Wait,u.userAddress)
elif(u[0]==host_port):
u[1]-=1
#Lorsqu'on recoit une demande pour retourner dans la main room
if(Type==4):
print("DEMANDE DE RETOUR A LA MAIN ROOM")
#envoie de l'acquittement
TypeAcq = 0
decalage= NumSeq << 4
seqTypAcq= decalage | TypeAcq
print("sequence et type concaténé pour le 1er acquittement est", seqTypAcq)
bufserver= struct.pack('!hh',4,seqTypAcq)
self.transport.write(bufserver)
#Fin de l'envoie de l'acquittement
for u in self.controlNumeroSequence:
if(u[0]==host_port and u[1]==NumSeq):
user=self.serverProxy.getUserByAddress(host_port)
if(user.userChatRoom!=ROOM_IDS.MAIN_ROOM):
print("il y a un utilisateur qui quitte une movie room")
self.serverProxy.stopStreamingMovie(user.userChatRoom)
username = self.serverProxy.getUserByAddress(host_port).userName
print(username)
print(host_port)
self.serverProxy.updateUserChatroom(username,ROOM_IDS.MAIN_ROOM)
#Mise à jour des la liste des utilisateurs
print("la liste des users MAJ pour c2w est:",self.serverProxy.getUserList())
for u in self.serverProxy.getUserList():
#paquet en direction des users de la main room
if (u.userChatRoom==ROOM_IDS.MAIN_ROOM):
print("le paquet sera envoyé à chaque utilisateur de la main room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MAIN_ROOM)
#self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver,u.userAddress])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
#reactor.callLater(1,self.send_And_Wait,u.userAddress)
#paquet en direction des users des movies room
else:
print("le paquet sera envoyé à chaque utilisateur de la movie room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MOVIE_ROOM)
print("le paquet est",bufserver)
#self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver,u.userAddress])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
#reactor.callLater(1,self.send_And_Wait,u.userAddress)
elif(u[0]==host_port):
u[1]-=1
#on recoit une demande de déconnexion
if(Type==2):
#envoie de l'acquittement
TypeAcq = 0
decalage= NumSeq << 4
seqTypAcq= decalage | TypeAcq
print("sequence et type concaténé pour le 1er acquittement est", seqTypAcq)
bufserver= struct.pack('!hh',4,seqTypAcq)
self.transport.write(bufserver)
#Fin envoie de l'acquittement
user = self.serverProxy.getUserByAddress(host_port)
if(user.userChatRoom==ROOM_IDS.MAIN_ROOM):
print("Supprimons l'utilisateur")
self.serverProxy.updateUserChatroom(user.userName,ROOM_IDS.OUT_OF_THE_SYSTEM_ROOM)
self.serverProxy.removeUser(self.serverProxy.getUserByAddress(host_port).userName)
for u in self.controlNumeroSequence:
if(u[0]==host_port):
self.controlNumeroSequence.remove(u)
print("on a une demande de déconnexion")
#le paquet sera envoyé à chaque utilisateur de la main room
for u in self.serverProxy.getUserList():
if (u.userChatRoom==ROOM_IDS.MAIN_ROOM):
print("le paquet sera envoyé à chaque utilisateur de la main room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MAIN_ROOM)
self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
reactor.callLater(1,self.send_And_Wait,u.userAddress)
else:
print("le paquet sera envoyé à chaque utilisateur de la movie room")
bufserver=self.paquetListUser(u.userChatInstance.monNumeroSequence+1,ROOM_IDS.MOVIE_ROOM)
self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver])
u.userChatInstance.transport.write(bufserver)
print("le paquet est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
reactor.callLater(1,self.send_And_Wait,u.userAddress)
#on recoit un message de chat qu'on doit diffuser aux utilisateurs de la meme room
if(Type==9):
#Envoie de l'acquittement
TypeAcq = 0
decalage= NumSeq << 4
seqTypAcq= decalage | TypeAcq
print("sequence et type concaténé pour le 1er acquittement est", seqTypAcq)
bufserver= struct.pack('!hh',4,seqTypAcq)
self.transport.write(bufserver)
# Fin Envoie de l'acquittement
for u in self.controlNumeroSequence:
#Pour l'emetteur,on compare le numero de sequence attendu au numero de sequence dans le paquet
#Si egalité on transmet le msg
#Sinon on ne fait rien et on décrementer u[1]
if(u[0]==host_port and u[1]==NumSeq):
sourceMsgChat=self.serverProxy.getUserByAddress(host_port)
paquetARetransmettre=reception[2]
print("le paquet a retransmettre aux autres utilisateurs est:",paquetARetransmettre)
z=0
for u in self.serverProxy.getUserList() :
#Si l'emetteur du msg est dans la meme room qu'un user, on envoie le msg à ce dernier
if(u.userChatRoom == sourceMsgChat.userChatRoom):
if(u.userName!=sourceMsgChat.userName):
z+=1
print("//////////////////////////////////////////",z)
print("on envoie a:",u.userName)
seqTypAcq= ((self.monNumeroSequence)<< 4) | 9
bufserver= struct.pack('!hh',4+len(paquetARetransmettre),seqTypAcq)
bufserver+=paquetARetransmettre
u.userChatInstance.transport.write(bufserver)
#self.filattente.append([u.userChatInstance.monNumeroSequence+1,1,0,bufserver,u.userAddress])
print("le paquet de chat y compris l'entete est",bufserver)
#self.monNumeroSequence=self.incrementerNumeroSequence(self.monNumeroSequence)
#reactor.callLater(1,self.send_And_Wait,u.userAddress)
elif(u[0]==host_port):
u[1]-=1
pass
|
# Generated by Django 2.2.12 on 2020-11-02 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('barang', '0006_auto_20201102_0655'),
]
operations = [
migrations.AlterField(
model_name='online',
name='jumlah',
field=models.IntegerField(null=True),
),
]
|
##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##
require 'rex/proto/ntlm/message'
require 'rex/proto/http'
require 'metasploit/framework/credential_collection'
class MetasploitModule < Msf::Auxiliary
include Msf::Auxiliary::Report
include Msf::Auxiliary::AuthBrute
include Msf::Auxiliary::Scanner
def initialize
super(
'Name' => 'OWA Exchange Web Services (EWS) Login Scanner',
'Description' => %q{
This module attempts to log in to the Exchange Web Services, often
exposed at https://example.com/ews/, using NTLM authentication. This
method is faster and simpler than traditional form-based logins.
In most cases, all you need to set is RHOSTS and some combination of
user/pass files; the autodiscovery should find the location of the NTLM
authentication point as well as the AD domain, and use them accordingly.
},
'Author' => 'Rich Whitcroft',
'License' => MSF_LICENSE,
'DefaultOptions' => { 'SSL' => true, 'VERBOSE' => false }
)
register_options(
[
OptBool.new('AUTODISCOVER', [ false, "Automatically discover domain URI", true ]),
OptString.new('AD_DOMAIN', [ false, "The Active Directory domain name", nil ]),
OptString.new('TARGETURI', [ false, "The location of the NTLM service", nil ]),
Opt::RPORT(443)
])
end
def run_host(ip)
cli = Rex::Proto::Http::Client.new(datastore['RHOSTS'], datastore['RPORT'], {}, datastore['SSL'], datastore['SSLVersion'], nil, '', '')
cli.set_config({ 'preferred_auth' => 'NTLM' })
cli.connect
domain = nil
uri = nil
if datastore['AUTODISCOVER']
domain, uri = autodiscover(cli)
if domain && uri
print_good("Found NTLM service at #{uri} for domain #{domain}.")
else
print_error("Failed to autodiscover - try manually")
return
end
elsif datastore['AD_DOMAIN'] && datastore['TARGETURI']
domain = datastore['AD_DOMAIN']
uri = datastore['TARGETURI']
uri << "/" unless uri.chars.last == "/"
else
print_error("You must set AD_DOMAIN and TARGETURI if not using autodiscover.")
return
end
cli.set_config({ 'domain' => domain })
creds = Metasploit::Framework::CredentialCollection.new(
blank_passwords: datastore['BLANK_PASSWORDS'],
pass_file: datastore['PASS_FILE'],
password: datastore['PASSWORD'],
user_file: datastore['USER_FILE'],
userpass_file: datastore['USERPASS_FILE'],
username: datastore['USERNAME'],
user_as_pass: datastore['USER_AS_PASS']
)
creds.each do |cred|
begin
req = cli.request_raw({
'uri' => uri,
'method' => 'GET',
'username' => cred.public,
'password' => cred.private
})
res = cli.send_recv(req)
rescue ::Rex::ConnectionError, Errno::ECONNREFUSED, Errno::ETIMEDOUT
print_error("Connection failed")
next
end
if res.code != 401
print_brute :level => :good, :ip => ip, :msg => "Successful login: #{cred.to_s}"
report_cred(
ip: ip,
port: datastore['RPORT'],
service_name: 'owa_ews',
user: cred.public,
password: cred.private
)
return if datastore['STOP_ON_SUCCESS']
else
vprint_brute :level => :verror, :ip => ip, :msg => "Failed login: #{cred.to_s}"
end
end
end
def autodiscover(cli)
uris = %w[ /ews/ /rpc/ /public/ ]
uris.each do |uri|
begin
req = cli.request_raw({
'encode' => true,
'uri' => uri,
'method' => 'GET',
'headers' => {'Authorization' => 'NTLM TlRMTVNTUAABAAAAB4IIogAAAAAAAAAAAAAAAAAAAAAGAbEdAAAADw=='}
})
res = cli.send_recv(req)
rescue ::Rex::ConnectionError, Errno::ECONNREFUSED, Errno::ETIMEDOUT
print_error("HTTP Connection Failed")
next
end
unless res
print_error("HTTP Connection Timeout")
next
end
if res && res.code == 401 && res.headers.has_key?('WWW-Authenticate') && res.headers['WWW-Authenticate'].match(/^NTLM/i)
hash = res['WWW-Authenticate'].split('NTLM ')[1]
domain = Rex::Proto::NTLM::Message.parse(Rex::Text.decode_base64(hash))[:target_name].value().gsub(/\0/,'')
return domain, uri
end
end
return nil, nil
end
def report_cred(opts)
service_data = {
address: opts[:ip],
port: opts[:port],
service_name: opts[:service_name],
protocol: 'tcp',
workspace_id: myworkspace_id
}
credential_data = {
origin_type: :service,
module_fullname: fullname,
username: opts[:user],
private_data: opts[:password],
private_type: :password
}.merge(service_data)
login_data = {
core: create_credential(credential_data),
last_attempted_at: DateTime.now,
status: Metasploit::Model::Login::Status::SUCCESSFUL,
}.merge(service_data)
create_credential_login(login_data)
end
end
|
import mandrill
import requests
import json
from datetime import datetime
API_KEY = 'j3VdGCRj9OsJiY5LZQlT5g'
mandrill_client = mandrill.Mandrill(API_KEY)
mandrill_link = 'https://mandrillapp.com/api/1.0/'
class Verify():
data = {
"key": API_KEY
}
responseStruct = requests.post(mandrill_link + 'users/info.json', data = data)
responseJSON = json.loads(responseStruct.text)
#print(responseJSON['username']);
class Email():
@staticmethod
def ss(item,products):
tempHTML = ""
loan_date = ""
id_page = ""
email = item['email']
id_page = item['id_signature']
loan_date = item['updated']
name = item['name']
lastname = item['lastname']
for element in products:
tempHTML += "<tr>"
tempHTML += "<td>"+element['Tag']+"</td>"
tempHTML += "<td>"+element['Product Name']+"</td>"
tempHTML += "<td>"+element['Serial Number']+"</td>"
tempHTML += "</tr>"
now = datetime.timestamp(datetime.now())
html = """<head>
<style>
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
.line {
width: 100%;
height: 1px;
border-bottom: 1px dashed #ddd;
margin: 20px 0;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>
</head>
<body>
<div class="line"></div>
<p>Buen día """+name+""" """+lastname+""".<p>
<p>Este es un mensaje automático para recordarle que la fecha de
entrega """+str(loan_date)+""" de su equipo en préstamo ha vencido,
favor acerquese al equipo de IT para hacer la entrega del mismo o para solicitar
una extensión del periodo de préstamo.</p>
<p>El equipo en préstamo en su hoja """+str(id_page)+""" es:</p>
<table>
<tr>
<th>Tag</th>
<th>Product Name</th>
<th>Serial</th>
</tr>"""+tempHTML+"""
</table>
<p>De antemano, ¡Muchas gracias!.</p>
<p>Atte. Helpdesk IT.</p>
<div class="line"></div>
<p> </p>
</body>"""
message = {
'from_email': 'no.reply@laureate.net',
'from_name': 'Helpdesk IT',
'headers': {'Reply-To': 'no.reply@laureate.net'},
'html': html,
'merge_language': 'mailchimp',
'subject': 'Return Borrowed Equipment Request',
'tags': ['password-resets'],
'to': [{'email': 'dennis.carcamo@laureate.net',
'name': 'Dennis Carcamo',
'type': 'to'}]
}
result = mandrill_client.messages.send(message=message, ip_pool='Main Pool')
#print (result)
message = """<div class="line"></div> """+ str(message)
if result[0]['status'] == "sent":
return {
"result": result,
"id_page": id_page,
"status": 'sent',
"message": html
}
else:
return {
"result":"error"
}
@staticmethod
def sendAdminEmails(admin, htmlMessage):
today = datetime.now().date()
hour = today.ctime()
nameSplit = str.split(admin['name'])
name = nameSplit[0]
lastname = nameSplit[1]
email =admin['email']
html = """<head>
<style>
table {
font-family: arial, sans-serif;
border-collapse: collapse;
width: 100%;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
tr:nth-child(even) {
background-color: #dddddd;
}
</style>
</head>
<body>
<p>Buen día """+name.capitalize()+""" """+lastname.capitalize()+""".<p>
<p>Este es un mensaje automático para notificarle sobre los correos
de prestamo enviados automáticamente el día """+str(today)+""" por InvIT Emails.</p>
<p>Los correos enviados son:</p>
<p> </p>
<p>"""+htmlMessage+"""</p>
<p>Atte. InvIT Emails.</p>
<p>¡Saludos!</p>
<p> </p>
</body>"""
message = {
'from_email': 'no.reply@laureate.net',
'from_name': 'Helpdesk IT',
'headers': {'Reply-To': 'no.reply@laureate.net'},
'html': html,
'merge_language': 'mailchimp',
'subject': 'Return Borrowed Equipment Request',
'tags': ['password-resets'],
'to': [{'email': email,
'name': name,
'type': 'to'}]
}
result = mandrill_client.messages.send(message=message, ip_pool='Main Pool')
if result[0]['status'] == "sent":
return {
"status": 'sent',
}
else:
return {
"result":"error"
}
@staticmethod
def sendEmails(resultForEmails, admins, supervisor, execution, adminEmailFrec, supervisorEmailFrec):
acumulatedSentEmails = ''
result = ''
allEmails = []
products = []
tempIdPage = ''
todayDate = Email.todayDate();
expirationDate = ''
if(resultForEmails):
for i, item in enumerate(resultForEmails):
#if is 1 the email should not be send if is 0 an email needs to be sent and compare the expirationDate +1 day with the todays Date
if(resultForEmails[i]['email_exception'] == 0 and Email.compareDates(int(todayDate)/1000, resultForEmails[i]['updated'])):
tempIdPage = resultForEmails[i]['id_signature']
try:
if(tempIdPage == resultForEmails[i+1]['id_signature']):
products.append({'Tag':item['id_product'],'Product Name':item['product_name'],'Serial Number':item['serial_number']})
else:
products.append({'Tag':item['id_product'],'Product Name':item['product_name'],'Serial Number':item['serial_number']})
result = Email.ss(item,products)
allEmails.append(result)
acumulatedSentEmails += str(result['message'])
products = []
except:
products.append({'Tag':item['id_product'],'Product Name':item['product_name'],'Serial Number':item['serial_number']})
result = Email.ss(item,products)
allEmails.append(result)
acumulatedSentEmails += str(result['message'])
products = []
#send emails to IT admins
if(adminEmailFrec == 'every time'):
for i, item in enumerate(admins):
Email.sendAdminEmails(item, acumulatedSentEmails);
elif(adminEmailFrec == 'once a week'):
today = datetime.today().strftime('%A')
if(today == 'Monday'):
for i, item in enumerate(admins):
Email.sendAdminEmails(item, acumulatedSentEmails);
elif(adminEmailFrec == 'only manually executions'):
if(execution == 'manual'):
for i, item in enumerate(admins):
Email.sendAdminEmails(item, acumulatedSentEmails);
elif(adminEmailFrec == 'only automatically executions'):
if(execution == 'automatic'):
for i, item in enumerate(admins):
Email.sendAdminEmails(item, acumulatedSentEmails);
#send emails to IT Supervisor
#if(supervisorEmailFrec == 'every time'):
# for i, item in enumerate(supervisor):
# Email.sendAdminEmails(item, acumulatedSentEmails);
#elif(supervisorEmailFrec == 'once a week'):
# today = datetime.today().strftime('%A')
# if(today == 'Monday'):
# for i, item in enumerate(supervisor):
# Email.sendAdminEmails(item, acumulatedSentEmails);
#elif(supervisorEmailFrec == 'only manually executions'):
# if(execution == 'manual'):
# for i, item in enumerate(supervisor):
# Email.sendAdminEmails(item, acumulatedSentEmails);
#elif(supervisorEmailFrec == 'only automatically executions'):
# if(execution == 'automatic'):
# for i, item in enumerate(supervisor):
# Email.sendAdminEmails(item, acumulatedSentEmails);
return allEmails
@staticmethod
def todayDate():
today = datetime.now();
todayTimestamp = datetime.timestamp(today)
return todayTimestamp
@staticmethod
def compareDates(todayDate,expirationDate):
expirationDateObj = datetime.strptime(expirationDate, '%Y-%M-%d')
expirationDateTimestamp = int(datetime.timestamp(expirationDateObj))/1000
#print(expirationDateTimestamp)
#print(todayDate)
if expirationDateTimestamp < todayDate:
#print('Return Date Expired')
return True
else:
#print('Return Date NOOO Expired')
return False
|
from datetime import datetime, timedelta
from ._base import BarReader
def show_bars(bars):
import pandas as pd
frame = pd.DataFrame([bar.__dict__ for bar in bars])
print(frame.set_index("datetime")[["open", "high", "low", "close", "volume", "vtSymbol"]])
def test(reader, symbol):
import traceback
assert isinstance(reader, BarReader)
now = datetime.now().replace(minute=0, second=0, microsecond=0) - timedelta(days=1)
params_list = [
{"symbol": symbol, "freq": "1m", "size": 20},
{"symbol": symbol, "freq": "1m", "start": now.replace(hour=10)},
{"symbol": symbol, "freq": "1m", "start": now.replace(hour=10), "end": now.replace(hour=11)},
{"symbol": symbol, "freq": "10m", "size": 20},
{"symbol": symbol, "freq": "10m", "start": now.replace(hour=10)},
{"symbol": symbol, "freq": "10m", "start": now.replace(hour=10), "end": now.replace(hour=11)},
{"symbol": symbol, "freq": "1h", "size": 20},
{"symbol": symbol, "freq": "1h", "start": now.replace(hour=10)},
{"symbol": symbol, "freq": "4h", "size": 30},
{"symbol": symbol, "freq": "4h", "start": now.replace(hour=8)-timedelta(hours=72)},
]
for params in params_list:
# 请求历史数据,返回barlist
print("history", params)
try:
data = reader.history(**params)
except Exception as e:
traceback.print_exc()
else:
# 以DataFrame输出barlist
show_bars(data)
print("-"*100)
# 请求历史数据, 保留未完成k线,返回barlist,最后一分钟k线的时间。
print("historyActive", params)
try:
data, last = reader.historyActive(**params)
except Exception as e:
traceback.print_exc()
else:
# 以DataFrame输出barlist
show_bars(data)
print("last time:", last)
print("-"*100)
|
#python selenium应用javascript
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
url="https://www.12306.cn/index/"
driver=webdriver.Firefox()
driver.get(url)
date = ['2019-10-17', '2020-1-17', '2020-2-17']
for i in date:
js = "document.getElementById('train_date').value='" + i + "'"
driver.execute_script(js)
|
from google.appengine.ext import ndb
class User(ndb.Model):
email = ndb.StringProperty()
password = ndb.StringProperty()
role = ndb.StringProperty()
|
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html
import numpy as np
from scipy import signal
from scipy.fftpack import fftshift
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, figsize=(8, 7))
t = np.arange(0, 1, 0.002)
fs = 1.0 / (t[1] - t[0])
x = 4 * np.sin(2 * np.pi * 15 * t) + 2 * np.cos(2 * np.pi * 48 * t)
xn = x + np.random.randn(len(x))
ax[0].plot(t, xn)
f, t, Sxx = signal.spectrogram(xn, fs, noverlap=32,)
# spectrogram(xn, 64, 60, [], 500)
p = ax[1].pcolormesh(t, f, Sxx, cmap="afmhot")
cbar = fig.colorbar(p, ax=ax[1])
ax[1].set_ylabel('Frequency [Hz]')
ax[1].set_xlabel('Time [sec]')
ax[1].set_ylim(0, 60)
cbar.ax.set_ylabel("amplitude #")
plt.savefig("fig.png")
plt.show()
|
#!/usr/bin/python3
import argparse
import typing
import os
import pathlib
import sys
EXTENSIONS = ['adoc', 'anaconda', 'conf', 'html', 'json', 'md', 'pp', 'profile', 'py', 'rb',
'rst', 'rules', 'sh', 'template', 'toml', 'var', 'xml', 'yaml', 'yml']
EXCLUSIONS = ['/shared/references/', '/logs/', '/tests/data/utils/']
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Print and fix files that don't end "
"in a new line")
parser.add_argument("paths", type=str, nargs="+")
parser.add_argument("--fix", action="store_true",
help='If set the program will add a new file to end of files that are '
'missing it.')
return parser.parse_args()
def get_files(path: pathlib.Path) -> list:
files = list()
for ext in EXTENSIONS:
files.extend(list(path.glob(f"**/*.{ext}")))
return files
def get_all_files(paths: list) -> list:
files = list()
for path in paths:
p = pathlib.Path(path)
if not p.exists():
sys.stderr.write(f"The path {p.absolute()} does not exist!\n")
exit(3)
files.extend(get_files(p))
return files
def should_skip_file(file: pathlib.Path):
for exclude in EXCLUSIONS:
if exclude in str(file.absolute()):
return True
return False
def is_file_readable(file: pathlib.Path, f: typing.BinaryIO) -> bool:
return not f.seekable() or file.stat().st_size < 2
def get_files_with_no_newline(files: list) -> list:
bad_files = list()
for file in files:
if should_skip_file(file):
continue
with open(file.absolute(), 'rb') as f:
if is_file_readable(file, f):
continue
f.seek(-1, os.SEEK_END)
data = f.read(1)
if data != b'\n':
bad_files.append(file)
return bad_files
def fix_file(file: pathlib.Path):
with open(file.absolute(), 'a') as f:
f.write('\n')
def main():
args = parse_args()
files = get_all_files(args.paths)
bad_files = get_files_with_no_newline(files)
count = len(bad_files)
for bad_file in bad_files:
print(bad_file.absolute())
if args.fix:
fix_file(bad_file)
print(f"{count} of {len(files)} files do not have the correct ending.")
if count != 0:
exit(1)
if __name__ == "__main__":
main()
|
def warmup1(line: list)->list:
warmup1 = []
for i in warmup1:
warmup1.remove(0)
return list(warmup1)
num = int(input("Enter a sequence of numbers:"))
found = []b
for numbers in num:
found.append() |
import pickle
import numpy as np
import cv2
import os
from copy import *
class Data_loader:
def __init__(self, filelist, scale_size, img_dir):
self.scale_size = scale_size
self.img_dir = img_dir
# self.img_mean = np.float32([[[104., 117., 124.]]])
with open(filelist, 'r') as fh:
contents = fh.readlines()
self.img_list = [cc.strip().split()[0] for cc in contents]
self.label_list = [cc.strip().split()[1] for cc in contents]
self.totalN = len(self.img_list)
print('total number of images: {0}'.format(self.totalN))
permuted = np.random.permutation(self.totalN)
self.eval_idx = permuted[0:int(self.totalN/5)] # %20 for evaluation
self.train_idx = permuted[int(self.totalN/5):] # %80 for training
def get_batch(self, batch_size=128, set_type='train'):
if set_type=='train':
idx_s = np.random.choice(self.train_idx, size=[batch_size,], replace=False)
else:
idx_s = np.random.choice(self.eval_idx, size=[batch_size,], replace=False)
img_s = np.zeros((batch_size, self.scale_size, self.scale_size, 3))
label_s = np.ones((batch_size, ))*-1
for ib,ii in enumerate(idx_s):
fname = self.img_list[ii]
assert(os.path.isfile(fname))
img = cv2.imread(fname)
# check scale
assert(np.min(img.shape[0:2]) == self.scale_size)
# center crop
img = img.astype(np.float32)
height,width=img.shape[0:2]
if height>width:
start_h=(height-self.scale_size)//2
img=img[start_h:start_h+self.scale_size, :, :]
else:
start_w=(width-self.scale_size)//2
img=img[:, start_w:start_w+self.scale_size, :]
img_s[ib] = deepcopy(img)
label_s[ib] = self.label_list[ii]
return img_s, label_s |
#!/usr/bin/env python
import os
import sys
from bluetooth import BluetoothSocket, L2CAP
import dbus
import dbus.service
import gobject
from dbus.mainloop.glib import DBusGMainLoop
import blinkt
from client import Keyboard
PROFILE = "org.bluez.Profile1"
ADDRESS = "B8:27:EB:EC:E9:95"
DEVICE_NAME = "PiZero"
PROFILE_DBUS_PATH = "/bluez/olly/simkeyboard"
CONTROL_PORT = 17
INTERRUPT_PORT = 19
SDP_RECORD_PATH = sys.path[0] + "/sdp_record.xml"
UUID="00001124-0000-1000-8000-00805f9b34fb"
def read_service_record():
with open(SDP_RECORD_PATH, "r") as f:
return f.read()
class KeyboardProfile(dbus.service.Object):
fd = -1
@dbus.service.method(PROFILE, in_signature="", out_signature="")
def Release(self):
print "Release"
mainloop.quit()
@dbus.service.method(PROFILE, in_signature="", out_signature="")
def Cancel(self):
print "Cancel"
@dbus.service.method(PROFILE, in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
print "NewConnection(%s, %d)" % (path, self.fd)
for key in properties.keys():
if key == "Version" or key == "Features":
print " %s = 0x%04x" % (key, properties[key])
else:
print " %s = %s" % (key, properties[key])
@dbus.service.method(PROFILE, in_signature="o", out_signature="")
def RequestDisconnection(self, path):
print "RequestDisconnection(%s)" % path
if self.fd > 0:
os.close(self.fd)
self.fd = -1
def __init__(self, bus, path):
dbus.service.Object.__init__(self, bus, path)
class SimulatedKeyboardDevice:
def __init__(self):
os.system("hciconfig hci0 up")
os.system("hciconfig hci0 class 0x002540")
os.system("hciconfig hci0 name " + DEVICE_NAME)
os.system("hciconfig hci0 piscan")
opts = {
"ServiceRecord": read_service_record(),
"Role": "server",
"RequireAuthentication": False,
"RequireAuthorization": False,
}
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez", "/org/bluez"), "org.bluez.ProfileManager1")
profile = KeyboardProfile(bus, PROFILE_DBUS_PATH)
manager.RegisterProfile(PROFILE_DBUS_PATH, UUID, opts)
self.control_socket = BluetoothSocket(L2CAP)
self.interrupt_socket = BluetoothSocket(L2CAP)
self.control_socket.setblocking(0)
self.interrupt_socket.setblocking(0)
self.control_socket.bind(("", CONTROL_PORT))
self.interrupt_socket.bind(("", INTERRUPT_PORT))
def listen(self):
print "Waiting for a connection"
self.control_socket.listen(1)
self.interrupt_socket.listen(1)
self.control_channel = None
self.interrupt_channel = None
gobject.io_add_watch(
self.control_socket.fileno(), gobject.IO_IN, self.accept_control)
gobject.io_add_watch(
self.interrupt_socket.fileno(), gobject.IO_IN,
self.accept_interrupt)
def accept_control(self, source, cond):
self.control_channel, cinfo = self.control_socket.accept()
print "Got a connection on the control channel from " + cinfo[0]
return True
def accept_interrupt(self, source, cond):
self.interrupt_channel, cinfo = self.interrupt_socket.accept()
print "Got a connection on the interrupt channel from " + cinfo[0]
return True
def send(self, message):
if self.interrupt_channel is not None:
self.interrupt_channel.send(message)
class KeyboardService(dbus.service.Object):
def __init__(self):
self.keyboard = Keyboard(self)
self.device = SimulatedKeyboardDevice()
self.device.listen()
blinkt.set_pixel(0, 0, 255, 0)
blinkt.show()
def send_keys(self, modifier, keys):
cmd_str = ""
cmd_str += chr(0xa1)
cmd_str += chr(0x1)
cmd_str += chr(modifier)
cmd_str += chr(0)
count = 0
for key_code in keys:
if count < 6:
cmd_str += chr(key_code)
count += 1
self.device.send(cmd_str)
if __name__ == "__main__":
if not os.geteuid() == 0:
sys.exit("Must run as root")
DBusGMainLoop(set_as_default=True)
blinkt.set_brightness(0.05)
blinkt.clear()
blinkt.set_pixel(0, 255, 0, 0)
blinkt.show()
service = KeyboardService()
gobject.threads_init()
mainloop = gobject.MainLoop()
mainloop.run()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 17 14:38:58 2018
@author: Dyass
"""
"""
Topics:
Examples with a Dictionary:
1:Create a frequency distribution mapping str:int
2:Find a word that occurs the most and how many times:
use a list,in case there is mote than one word
returns a tuple(list,int) for (words_list,highest_freq)
3:Find the word that occur at least X times:
let user choose "at least X times,"so allow as parameters
returns a list of tuples,each tuple is a (list,int)
containing the list of words ordered by their frequency
IDEA:
From song dictionary,find most frequent word.Delete the most common
word.Repeat,it works because you are mutating the song
dictionary
"""
def lyrics_to_frequencies(lyrics):
myDict={}
for word in lyrics:
if word in myDict:
myDict[word]+=1
else:
myDict[word]=1
return myDict
def most_common_words(freqs):
values=freqs.values()
best =max(values)
words=[]
for i in freqs:
if freqs[i]==best:
words.append(i)
return(words,best)
def words_often(freqs,minTimes):
result=[]
done=False
while not done:
temp=most_common_words(freqs)
if temp[1]>=minTimes:
result.append(temp)
for w in temp[0]:
del(freqs[w])
else:
done=True
return result
she_loves_you = ['she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'you', 'think', "you've", 'lost', 'your', 'love',
'well', 'i', 'saw', 'her', 'yesterday-yi-yay',
"it's", 'you', "she's", 'thinking', 'of',
'and', 'she', 'told', 'me', 'what', 'to', 'say-yi-yay',
'she', 'says', 'she', 'loves', 'you',
'and', 'you', 'know', 'that', "can't", 'be', 'bad',
'yes', 'she', 'loves', 'you',
'and', 'you', 'know', 'you', 'should', 'be', 'glad',
'she', 'said', 'you', 'hurt', 'her', 'so',
'she', 'almost', 'lost', 'her', 'mind',
'and', 'now', 'she', 'says', 'she', 'knows',
"you're", 'not', 'the', 'hurting', 'kind',
'she', 'says', 'she', 'loves', 'you',
'and', 'you', 'know', 'that', "can't", 'be', 'bad',
'yes', 'she', 'loves', 'you',
'and', 'you', 'know', 'you', 'should', 'be', 'glad',
'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'you', 'know', "it's", 'up', 'to', 'you',
'i', 'think', "it's", 'only', 'fair',
'pride', 'can', 'hurt', 'you', 'too',
'pologize', 'to', 'her',
'Because', 'she', 'loves', 'you',
'and', 'you', 'know', 'that', "can't", 'be', 'bad',
'Yes', 'she', 'loves', 'you',
'and', 'you', 'know', 'you', 'should', 'be', 'glad',
'oo', 'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'she', 'loves', 'you', 'yeah', 'yeah', 'yeah',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'with', 'a', 'love', 'like', 'that',
'you', 'know', 'you', 'should', 'be', 'glad',
'yeah', 'yeah', 'yeah',
'yeah', 'yeah', 'yeah', 'yeah'
]
song=lyrics_to_frequencies(she_loves_you)
print(words_often(song,5))
|
# -- coding: utf8 --
import sys
sys.path.insert(0,'..')
from model.data_model import *
import common.common as common
def main():
daylist = common.getdaylist('20151201','20151222')
for day in daylist:
print day
tablename = 'devpower_detail_' + day
data_model(tablename).drop()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python2
from __future__ import print_function
import sys, os, time
import shutil
from pdb import set_trace
from glob import glob
import re
########################## Parsing and environment ############################
import subprocess
from helpers import submitjob, createClusterInfo, resetJobOutput
def doSub():
from argparse import ArgumentParser as argps
swdir = os.path.realpath(os.environ['CMSSW_BASE'])
issgesched = len(os.getenv('SGE_CELL','')) > 0
jobid = 'jobid' #os.environ['jobid']
inputdir = os.path.join(swdir, 'inputs')
inputdir = os.path.join(inputdir, jobid)
parser = argps('submit analyzer to the batch queues')
parser.add_argument('configfile')
parser.add_argument('jobdir')
parser.add_argument('--file',default='samples.cfg',help='file containing a sample list')
parser.add_argument('--nosubmit',default=False,help='no submission')
parser.add_argument('--outpath',default='',help='set path to store the .root output')
parser.add_argument('--walltime',default='21600',help='set job wall time in seconds')
parser.add_argument('--maxsize',default='2000',help='set maximum allowed size of output ntuple')
args = parser.parse_args()
jobruntime=args.walltime
maxSize=args.maxsize
#eosGlobalOutDir='/eos/user/'+os.environ['USER'][0]+'/'+os.environ['USER']+'/DeepNtuples'
eosGlobalOutDir='/eos/cms/store/cmst3/group/dehep/DeepJet/NTuples/'
if len(args.outpath):
eosGlobalOutDir=args.outpath
if not os.path.isdir(eosGlobalOutDir):
print('please specify a valid output path')
sys.exit(-1)
if os.path.isdir(args.jobdir):
print (args.jobdir, 'exists: EXIT')
sys.exit(-1)
configFile=os.path.realpath(args.configfile)
###### check for grid certificate
#check grid proxy with voms-proxy-info
# grep for timeleft and require at least 3 hours
# and check wether the path lives in AFS (voms-proxy-info -path)
#save this output for auto wget on file lists
checkcert = subprocess.Popen(['voms-proxy-info','-path'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sout, serr = checkcert.communicate()
certpath=sout
checkcert = subprocess.Popen(['voms-proxy-info','-timeleft'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sout, serr = checkcert.communicate()
certtime=sout
if float(certtime) < 2*60*60:
print('grid proxy loses validity in less than 2 hours, please renew before job submission.')
exit()
usercertfile=os.getenv('HOME')+'/.globus/usercert.pem'
userkeyfile=os.getenv('HOME')+'/.globus/userkey.pem'
nousercertsfound=False
if not os.path.isfile(usercertfile):
print('pleace locate your grid certificate file in ~/.globus/usercert.pem')
nousercertsfound=True
if not os.path.isfile(userkeyfile):
print('pleace locate your grid key file in ~/.globus/userkey.pem')
nousercertsfound=True
if nousercertsfound:
print('please follow the Twiki https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookStartingGrid')
exit()
#recreates samples directory (removing old one avoids possible errors in creating importsamples)
samplescriptdir=os.getenv('HOME')+'/.deepntuples_scripts_tmp'
if not os.path.isdir(samplescriptdir):
os.mkdir(samplescriptdir)
samplescriptdir+='/'
#https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookStartingGrid
#testurl='https://cmsweb.cern.ch/das/makepy?dataset=/QCD_Pt_1000to1400_TuneCUETP8M1_13TeV_pythia8/PhaseIFall16MiniAOD-PhaseIFall16PUFlat20to50_PhaseIFall16_81X_upgrade2017_realistic_v26-v1/MINIAODSIM&instance=prod/global'
#make a system call to wget.. urllib is not powerful enoguh apparently
#checkcert = subprocess.Popen(['wget','--certificate='+certpath,'-o bla.py', testurl], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
#print(checkcert.communicate())
#exit()#testing
######## all checks done. From now on it just runs
# create output dir
os.mkdir(args.jobdir)
shutil.copy(configFile, args.jobdir)
configFile=os.path.realpath(os.path.join(args.jobdir, os.path.basename(configFile)))
globalOutDir=eosGlobalOutDir+'/'+time.strftime('%a_%H%M%S')+'_'+args.jobdir
globalOutDir=os.path.realpath(globalOutDir)
print ('submitting jobs for '+configFile)
samplesdir='DeepNTuples.DeepNtuplizer.samples.'
#format: njobs sample output args1 args2 ... (simple whitespace)
lines = [line.rstrip('\n') for line in open(args.file)]
for sampledescription in lines:
if sampledescription.strip().startswith("#"):
continue
entries= [s.strip() for s in sampledescription.split(' ') if s]
if len(entries) < 3:
continue
#check if sufficient files
######check out from DAS
samplename=entries[1]
isdasname=False
#do differently if it is a DAS name
if '/' in samplename: #this is DAS name
isdasname=True
if '*' in samplename:
print('no wildcards in sample names allowed')
exit()
print('preparing\n'+samplename)
sample=""
if not isdasname:
sample=samplesdir+samplename
else:
import string
chars = re.escape(string.punctuation)
scriptfile=re.sub(r'['+chars+']', '', str(samplename))
scriptfile=scriptfile
if not os.path.isfile(samplescriptdir+scriptfile+'.py'):
cmd = 'dasgoclient -query="file dataset=%s"' % (samplename)
dasquery = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout = dasquery.communicate()[0]
filelist = ['"%s",' % f for f in sout.strip().split('\n')]
template_sample = os.path.join(os.environ['CMSSW_BASE'], 'src/DeepNTuples/DeepNtuplizer/python/samples/samples_template.py')
dest_file = samplescriptdir+scriptfile+'.py'
with open(template_sample) as temp:
s = temp.read().replace('_FILES_', '\n'.join(filelist))
with open(dest_file, 'w') as fout:
fout.write(s)
sample=scriptfile
sys.path.append(samplescriptdir)
sys.path.append(swdir+'/src/DeepNTuples/DeepNtuplizer/python/samples')
importsample=sample.split('.')[-1]
#print(importsample)
cmssource = __import__(importsample)
#print(cmssource.source)
nJobs=entries[0]
totalfiles=len(cmssource.source.fileNames)+len(cmssource.source.secondaryFileNames)
if int(nJobs)>totalfiles:
print('reduced number of jobs to number of files (',totalfiles,') from ', nJobs)
nJobs=str(totalfiles)
outputFile=entries[2]
jobargs=''
if len(entries) >3:
jobargs=entries[3]
jobpath = os.path.join(
args.jobdir,
outputFile
)
jobpath=os.path.realpath(jobpath)
os.mkdir(jobpath)
sheelscp=os.path.realpath(os.path.join(jobpath, 'batchscript.sh'))
#create full output path on eos
#print (eosGlobalOutDir)
ntupleOutDir=globalOutDir+'/'+outputFile+'/output/'
os.makedirs(ntupleOutDir)
#print (ntupleOutDir)
#link to ntupleOutDir
os.symlink(ntupleOutDir,jobpath+'/output')
#ntupleOutDir=jobpath+'/output/'
hostname=os.getenv('HOSTNAME')
if not hostname:
raise Exception("hostname could not be determined")
#create directory in /tmp/ to host the .out and .log files
#logDir=globalOutDir+'/'+outputFile+'/batch/' #'/tmp/'+os.environ['USER']+'/batch/'
helperdir='/tmp/'+os.environ['USER']+'/'+ str(os.getpid()) +'/'+outputFile+'/batch/'
logDir=jobpath+'/batch/'
os.makedirs(logDir)
os.makedirs(helperdir)
os.symlink(helperdir,jobpath+'/helper')
infostring='helper files in \n'+helperdir+' on machine\n'+hostname
#print(infostring+'\n delete after all jobs are finished (check.py does it automatically)')
#create a txt file and save the eos path there
address = open(jobpath+'/hostinfo.txt','w')
#address.write(logDir)
address.write(hostname)
address.close()
#print(ntupleOutDir)
#The maximum wall time of a condor job is defined in the MaxRuntime parameter in seconds.
# 3 hours (10800s) seems to be currently enough
condorfile ="""executable = {batchscriptpath}
arguments = {configfile} inputScript={sample} nJobs={njobs} job=$(ProcId) {options}
log = {logdir}con_out.$(ProcId).log
getenv = True
environment = "NTUPLEOUTFILEPATH={ntupledir}{outputfile}_$(ProcId).root"
use_x509userproxy = True
+MaxRuntime = {maxruntime}
max_transfer_output_mb = {maxsize}
RequestCpus = 2
transfer_output_remaps = "stdout.txt={logdir}con_out.$(ProcId).out"
max_retries = 20
queue {njobs}
""".format(
batchscriptpath=sheelscp,
configfile=configFile,
sample=sample,
ntupledir=ntupleOutDir,
outputfile=outputFile,
njobs=nJobs,
options=jobargs,
maxruntime=jobruntime,
maxsize=maxSize,
logdir=logDir
)
conf = open(os.path.join(jobpath, 'condor.sub'), 'w')
conf.write(condorfile)
conf.close()
print("wrote condor file for "+outputFile)
allsgescripts=[]
#create individual condor files for resubmission
for job in range(0,int(nJobs)):
jobcondorfile ="""executable = {batchscriptpath}
arguments = {configfile} inputScript={sample} nJobs={njobs} job={job} {options}
log = {logdir}con_out.{job}.log
getenv = True
environment = "NTUPLEOUTFILEPATH={ntupledir}{outputfile}_{job}.root"
use_x509userproxy = True
+MaxRuntime= {maxruntime}
RequestCpus = 2
max_transfer_output_mb = {maxsize}
transfer_output_remaps = "stdout.txt={logdir}con_out.{job}.out"
max_retries = 3
queue 1
""".format(
batchscriptpath=sheelscp,
configfile=configFile,
sample=sample,
ntupledir=ntupleOutDir,
outputfile=outputFile,
njobs=nJobs,
options=jobargs,
job=str(job),
maxruntime=jobruntime,
maxsize=maxSize,
logdir=logDir
)
jconf = open(os.path.join(logDir,'condor_'+str(job)+'.sub'), 'w')
jconf.write(jobcondorfile)
jconf.close()
jobsgefile="""
#!/bin/sh
#
#(make sure the right shell will be used)
#$ -S /bin/sh
#$ -l site=hh
#$ -l distro=sld6
#
#(the cpu time for this job)
#$ -l h_rt=05:55:00
#
#(the maximum memory usage of this job)
#$ -l h_vmem=4096M
#$ -l cvmfs
#(stderr and stdout are merged together to stdout)
#$ -j y
#$ -m a
#$ -cwd -V
#( -l h_stack=1536M) #try with small stack
#$ -pe local 1 -R y
#$ -P af-cms
export LOGDIR={logdir}
export JOB={job}
export NTUPLEOUTFILEPATH={ntupledir}{outputfile}_{job}.root
{batchscriptpath} {configfile} inputScript={sample} nJobs={njobs} job={job} {options}
""".format(
batchscriptpath=sheelscp,
configfile=configFile,
sample=sample,
ntupledir=ntupleOutDir,
outputfile=outputFile,
njobs=nJobs,
options=jobargs,
job=str(job),
#maxruntime=jobruntime,
#maxsize=maxSize,
logdir=logDir
)
if issgesched or True:
sgefile=os.path.join(logDir,'sge_'+str(job)+'.sh')
jconf = open(sgefile, 'w')
jconf.write(jobsgefile)
jconf.close()
allsgescripts.append(sgefile)
os.system('chmod +x '+sgefile)
resetJobOutput(jobpath,job)
#create script
shellscript = """#!/bin/bash
workdir=""
if [ $SGE_CELL ]
then
workdir=`mktemp -d -t DeepNTuplesXXXXXX`
cd $workdir
workdir=$workdir"/"
fi
exec > "$PWD/stdout.txt" 2>&1
echo "JOBSUB::RUN job running"
trap "echo JOBSUB::FAIL job killed" SIGTERM
export OUTPUT=$workdir"{outputfile}"
cd {basedir}
eval `scramv1 runtime -sh`
export PYTHONPATH={sampleScriptdir}:$PYTHONPATH
which cmsRun
cd -
cmsRun -n 1 "$@" outputFile=$OUTPUT
exitstatus=$?
if [ $exitstatus != 0 ]
then
echo JOBSUB::FAIL job failed with status $exitstatus
else
pwd
ls -ltr $OUTPUT*.root
if [ $SGE_CELL ]
then
cp $OUTPUT*.root $NTUPLEOUTFILEPATH
else
eos cp $OUTPUT*.root $NTUPLEOUTFILEPATH
fi
exitstatus=$?
rm -f $OUTPUT*.root
if [ $exitstatus != 0 ]
then
echo JOBSUB::FAIL job failed with status $exitstatus
else
echo JOBSUB::SUCC job ended sucessfully
fi
fi
rm -f $OUTPUT*.root
if [ $workdir ]
then
# JOB is only defined for SGE submit
cp $workdir/stdout.txt $LOGDIR/con_out.$JOB.out
rm -rf $workdir
fi
exit $exitstatus
""".format(
sampleScriptdir=samplescriptdir,
basedir=swdir,
jobdir=os.path.realpath(args.jobdir),
outputfile=outputFile
)
shellsc = open(sheelscp, 'w')
shellsc.write(shellscript)
shellsc.close()
os.system('chmod +x '+sheelscp)
os.system('touch '+logDir+'/nJobs.'+str(nJobs))
sgelist=open(jobpath+'/sge_sub.sh','w')
for line in allsgescripts:
sgelist.write('qsub '+line+'\n')
os.system('chmod +x '+jobpath+'/sge_sub.sh')
sgelist.close()
#add a 'touch for the .out file to make the check realise it is there
if not args.nosubmit and not issgesched:
print('submitting '+outputFile)
#os.system('cd ' + jobpath + ' && echo condor_submit condor.sub') # gives back submitted to cluster XXX message - use
cluster=submitjob(jobpath,'condor.sub')
#print(cluster)
for job in range(0,int(nJobs)):
createClusterInfo(jobpath,job,cluster,True)
if not args.nosubmit and issgesched:
pass
exit()
doSub()
|
from django.contrib.auth import login as auth_login
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from .models import Item, Tag
from .forms import ItemForm, TagForm, SignupForm
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, UpdateView, DeleteView, FormView
from django.views.generic import ListView, DetailView
from datetime import datetime
from django_filters.views import FilterView
from .filters import ItemFilter
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
#from django.views.generic import Createview
import csv
# Create your views here.
class SignupView(CreateView):
form_class = SignupForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html'
class Hello(View):
def get(self,request):
with open("accounts/amazon.csv", "r", encoding="utf-8")as fb:
reader = csv.reader(fb)
for i in reader:
return
context = {"message": "helloWorld"}
times = datetime.now()
time = {"time": times}
return TemplateResponse(request, "hello.html", time)
hello = Hello.as_view()
class AddTagView(CreateView):
model = Item
form_class = TagForm
success_url = reverse_lazy('accounts:create')
class TagView(ListView):
model = Item
form_class = TagForm
def get_queryset(self):
queryset = Item.objects.all().filter(tag__pk=self.kwargs['pk'])
return queryset
class ItemFilterView(LoginRequiredMixin, FilterView):
model = Item
filterset_class = ItemFilter
template_name = "accounts/item_filter.html"
# デフォルトの並び順を新しい順とする
queryset = Item.objects.all().order_by('-created_at')
# クエリ未指定の時に全件検索を行うために以下のオプションを指定(django-filter2.0以降)
strict = False
# pure_pagination用設定
paginate_by = 2
object = Item
# 検索条件をセッションに保存する or 呼び出す
def get(self, request, **kwargs):
if request.GET:
request.session['query'] = request.GET
else:
request.GET = request.GET.copy()
if 'query' in request.session.keys():
for key in request.session['query'].keys():
request.GET[key] = request.session['query'][key]
return super().get(request, **kwargs)
index = ItemFilterView.as_view()
# 詳細画面
class ItemDetailView( LoginRequiredMixin, DetailView):
model = Item
detail = ItemDetailView.as_view()
# 登録画面
class ItemCreateView(LoginRequiredMixin,CreateView):
model = Item
form_class = ItemForm
template_name = "accounts/item_form.html"
success_url = reverse_lazy("accounts:index")
create = ItemCreateView.as_view()
# 更新画面
class ItemUpdateView(LoginRequiredMixin, UpdateView):
model = Item
form_class = ItemForm
success_url = reverse_lazy('accounts:index')
update = ItemUpdateView.as_view()
class ItemDeleteView(LoginRequiredMixin, DeleteView):
model = Item
success_url = reverse_lazy('accounts:index')
def delete(self, request, *args, **kwargs):
"""
Call the delete() method on the fetched object and then redirect to the
success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
delete = ItemDeleteView.as_view()
"""
class LoginView(View):
def get(self, request):
context = {"form": LoginForm()}
return render(request, "account/login.html", context)
def post(self, request):
#リクエストからのフォームを生成
form = LoginForm(request.POST)
#バリデーション(ユーザーの認証も泡あせて実施)
if not form.is_valid():
return render(request, "accounts/login.html", context)
#オブジェクトをフォームから取得
user = form.get_user()
#ログイン処理(取得したUserオブジェクトをセッションに保存&userデータを更新)
auth_login(request, user)
return redirect(reverse(""))
loginview = LoginView.as_view()
""" |
import cosmolopy
import pymc
from pymc import Metropolis
from McMc import mcmc
from astropy.io import fits
from McMc import cosmo_utils
import scipy
# post intéressant pour mettre son propre likelihood
#https://groups.google.com/forum/#!topic/pymc/u9v3XPOMWTY
################ SNIa #############################################
from McMc import model_sn1a
reload(model_sn1a)
S=pymc.MCMC(model_sn1a)
S.use_step_method(pymc.AdaptiveMetropolis,S.stochastics,delay=1000)
S.sample(iter=10000,burn=5000,thin=10)
clf()
xlim(0,1)
ylim(0,1.5)
mcmc.cont(S.trace('om')[:],S.trace('ol')[:],nsig=5,color='red')
xx=np.linspace(0,1,1000)
plot(xx,1-xx,'k:')
###################################################################
############# BAO Lyman-alpha DR11 ################################
from McMc import model_lyaDR11 as lya
reload(lya)
B=pymc.MCMC(lya)
B.use_step_method(pymc.AdaptiveMetropolis,B.stochastics,delay=1000)
B.sample(iter=50000,burn=10000,thin=10)
clf()
xlim(0,1)
ylim(0,1.5)
mcmc.cont_gkde(B.trace('om')[:],B.trace('ol')[:],nsig=5)
xx=np.linspace(0,1,1000)
plot(xx,1-xx,'k:')
reload(lya)
hvals=B.trace('h')[:]
omvals=B.trace('om')[:]
olvals=B.trace('ol')[:]
ll=np.zeros(hvals.size)
invhrs=np.zeros(hvals.size)
da_rs=np.zeros(hvals.size)
for i in np.arange(hvals.size):
print(i,hvals.size)
ll[i],invhrs[i],da_rs[i]=lya.theproba_ext(h=hvals[i], om=omvals[i],ol=olvals[i],ob=mycosmo['omega_b_0'])
mcmc.cont_gkde(invhrs,da_rs,fill=False,color='red',alpha=1)
###################################################################
######## Plot of both #############################################
clf()
xlim(0,1)
ylim(0,1.5)
mcmc.cont(B.trace('om')[:],B.trace('ol')[:],nsig=5,alpha=0.5)
mcmc.cont(S.trace('om')[:],S.trace('ol')[:],nsig=5,color='red',alpha=0.5)
xx=np.linspace(0,1,1000)
plot(xx,1-xx,'k:')
###################################################################
############# BAO Lyman-alpha DR11 Flat w ################################
from McMc import model_lyaDR11_flatw as lya
reload(lya)
B=pymc.MCMC(lya)
B.use_step_method(pymc.AdaptiveMetropolis,B.stochastics,delay=10000)
B.sample(iter=500000,burn=10000,thin=10)
clf()
xlim(0,1)
ylim(-2,0)
mcmc.cont_gkde(B.trace('om')[:],B.trace('w')[:],nsig=5)
xx=np.linspace(0,1,1000)
plot(xx,1-xx,'k:')
reload(lya)
hvals=B.trace('h')[:]
omvals=B.trace('om')[:]
olvals=1.-B.trace('om')[:]
wvals=B.trace('w')[:]
ll=np.zeros(hvals.size)
invhrs=np.zeros(hvals.size)
da_rs=np.zeros(hvals.size)
for i in np.arange(hvals.size):
print(i,hvals.size)
ll[i],invhrs[i],da_rs[i]=lya.thelogproba_ext(h=hvals[i], om=omvals[i],w=wvals[i],ob=0.0463)
reload(lya)
plot(invhrs,da_rs,'k,',alpha=0.2)
mcmc.cont_gkde(invhrs,da_rs,fill=False,color='red',alpha=1)
xlim(0.0022,0.0032)
ylim(6,16)
###################################################################
############# BAO Lyman-alpha DR11 Flat w NEW ################################
import pymc
from pymc import Metropolis
import cosmolopy
from McMc import mcmc
from astropy.io import fits
from McMc import cosmo_utils
import scipy
from McMc import model_lyaDR11_flatw_new as lya
reload(lya)
B=pymc.MCMC(lya)
B.use_step_method(pymc.AdaptiveMetropolis,B.stochastics,delay=10000)
B.sample(iter=100000,burn=10000,thin=10)
clf()
xlim(0,1)
ylim(-2,0)
mcmc.cont_gkde(B.trace('om')[:],B.trace('w')[:],nsig=5)
xx=np.linspace(0,1,1000)
plot(xx,1-xx,'k:')
h=B.trace('h')[:]
om=B.trace('om')[:]
w=B.trace('w')[:]
ob=B.trace('ob')[:]
invhrs=B.trace('invhrs')[:]
da_rs=B.trace('da_rs')[:]
reload(lya)
plot(invhrs,da_rs,'g,',alpha=0.6)
mcmc.cont_gkde(invhrs,da_rs,fill=False,color='green',alpha=1,nsig=5)
xlim(0.0022,0.0032)
ylim(6,16)
### compare with IDL drawn McMc (soft used for Busca et al. 2012)
import scipy.io
bla=scipy.io.readsav('chain_flatwcst_anislyaDR11.save')
chain=bla.chain
sh=shape(chain)
idl_da_rs=np.zeros(sh[0])
idl_invhrs=np.zeros(sh[0])
for i in np.arange(sh[0]):
print(i,sh[0])
idl_da_rs[i]=lya.my_da_rs(h=chain[i,4],om=chain[i,0],ob=0.0227/0.7**2,w=chain[i,2])
idl_invhrs[i]=lya.my_invhrs(h=chain[i,4],om=chain[i,0],ob=0.0227/0.7**2,w=chain[i,2])
rndorder=argsort(np.random.random(sh[0]))
idl_invhrs_new=idl_invhrs[rndorder]
idl_da_rs_new=idl_da_rs[rndorder]
om_idl=chain[rndorder,0]
w_idl=chain[rndorder,2]
nmax=100000
clf()
xlim(0,1)
ylim(-2,0)
mcmc.cont_gkde(B.trace('om')[:],B.trace('w')[:],nsig=5)
mcmc.cont_gkde(om_idl[0:nmax],w_idl[0:nmax],nsig=5,color='red')
xx=np.linspace(0,1,1000)
plot(xx,1-xx,'k:')
nmax=100000
clf()
reload(lya)
#plot(idl_invhrs_new[0:nmax],idl_da_rs_new[0:nmax],'r,',alpha=0.2)
mcmc.cont_gkde(idl_invhrs_new[0:nmax],idl_da_rs_new[0:nmax],fill=False,color='green',alpha=1,nsig=5)
#plot(invhrs,da_rs,'k,',alpha=1)
mcmc.cont_gkde(invhrs,da_rs,fill=False,color='red',alpha=1,nsig=5)
xlim(0.0022,0.0032)
ylim(6,16)
clf()
xlim(0,1)
ylim(-2,0)
title(library)
mcmc.cont_gkde(om,w,color='blue')
xx=np.linspace(0,1,1000)
plot(xx,xx*0-1,'k:')
###################################################################
|
"""
Markdown parsing and rewriting for embedding narrative images.
Contains a Parsing Expression Grammar (PEG) for parsing Markdown with
:py:mod:`pyparsing`. The grammar expects a complete Markdown document, but
only parses the minimal number of Markdown constructs required for our needs
(namely, embedding narrative images).
The two most useful documentation pages for pyparsing are its
`general usage overview <https://pyparsing-docs.readthedocs.io/en/latest/HowToUsePyparsing.html>`__ and
`API reference <https://pyparsing-docs.readthedocs.io/en/latest/pyparsing.html>`__,
which contains more details for each class than the usage doc.
Existing Markdown parsers, though many, all fell short of our use case of
transforming some input Markdown to some other output Markdown. They all focus
on converting Markdown to HTML. As such, they throw away important original
source text during the parse, thus making it impossible to reconstruct with any
fidelity and very difficult/tedious to produce more Markdown (i.e. you have to
cover all constructs). I evaluted
`Markdown <https://pypi.org/project/Markdown/>`__,
`commonmark <https://pypi.org/project/commonmark/>`,
`markdown-it-py <https://pypi.org/project/markdown-it-py/>`__
(what I thought going into this would be our preferred choice!),
`mistletoe <https://pypi.org/project/mistletoe/>`__,
and `mistune <https://pypi.org/project/mistune/>`__.
"""
from base64 import b64encode
from copy import copy
import mimetypes
import re
from dataclasses import dataclass
from enum import Enum
from operator import attrgetter
from pathlib import Path
from pyparsing import (
Group,
LineEnd,
LineStart,
nested_expr,
ParserElement,
QuotedString,
SkipTo,
StringEnd,
White,
ZeroOrMore,
)
from typing import ClassVar, Generator, Iterable, Optional, Union
from urllib.parse import urlsplit, quote as urlquote
# The AST-like nodes we generate here for parsed elements follow the mdast
# spec¹. We have to have _some_ data model, so might as well use an existing
# one to a) avoid coming up with our own and b) help us to think about cases we
# might not consider. Note that we don't actually construct an AST, but
# produce a simpler flat list of nodes.
# -trs, 17 Nov 2022
#
# ¹ https://github.com/syntax-tree/mdast
@dataclass
class Node:
type: ClassVar[str] = "node"
@dataclass
class ResourceMixin:
url: str
title: Optional[str] = None
@dataclass
class AlternativeMixin:
alt: Optional[str]
@dataclass
class AssociationMixin:
identifier: str
label: Optional[str]
class ReferenceType(Enum):
full = "full"
collapsed = "collapsed"
shortcut = "shortcut"
@dataclass
class ReferenceMixin(AssociationMixin):
referenceType: ReferenceType
@dataclass
class ImageNode(Node, ResourceMixin, AlternativeMixin):
type: ClassVar[str] = "image"
@dataclass
class ImageReferenceNode(Node, AlternativeMixin, ReferenceMixin):
type: ClassVar[str] = "imageReference"
@dataclass
class DefinitionNode(Node, ResourceMixin, AssociationMixin):
type: ClassVar[str] = "definition"
@dataclass
class CodeNode(Node):
type: ClassVar[str] = "code"
lang: Optional[str]
meta: Optional[str]
value: str # from mdast's Literal
# Don't skip newlines when skipping whitespace.
#
# XXX TODO: This will affect *global* usage of pyparsing within this process,
# which may have unintended effects. Currently we don't seem to have any
# dependents which also use pyparsing (without vendoring), so this seems ok.
# It's not clear to me at the moment how to properly configure each of our
# individual elements instead, but we should probably figure that out at some
# point.
# -trs, 18 Nov 2022
ParserElement.set_default_whitespace_chars(" \t")
# Nested brackets are acceptable within outer brackets, e.g. of image alt text,
# as long as they're balanced.
BalancedBrackets = nested_expr(*"[]")
# 
# 
Title = QuotedString('"', esc_char = "\\", unquote_results = False, convert_whitespace_escapes = False)("title")
ImageWithTitle = Group("("alt") + "](" + SkipTo(White()).leave_whitespace()("url") + Title + ")")
ImageWithoutTitle = Group("("alt") + "](" + SkipTo(")")("url") + ")")
Image = ((ImageWithTitle | ImageWithoutTitle)
.set_name("Image"))
# https://github.com/syntax-tree/mdast#image
Image.set_parse_action(lambda tokens: ImageNode(
alt = tokens[0]["alt"],
url = tokens[0]["url"],
title = tokens[0].get("title"),
))
# ![alt][label] a "full" reference
# ![alt][] a "collapsed" reference
# ![alt] a "shortcut" reference
ImageReferenceExplicit = Group("![" + SkipTo("][", ignore = BalancedBrackets)("alt") + "][" + SkipTo("]")("label") + "]")
ImageReferenceImplicit = Group("![" + SkipTo("]", ignore = BalancedBrackets)("alt") + "]")
ImageReference = ((ImageReferenceExplicit | ImageReferenceImplicit)
.set_name("ImageReference"))
# https://github.com/syntax-tree/mdast#imagereference
ImageReference.set_parse_action(lambda tokens: ImageReferenceNode(
alt = tokens[0]["alt"],
label = tokens[0].get("label"),
identifier = normalize_label(tokens[0].get("label") or tokens[0].get("alt")),
referenceType = reference_type(tokens[0].get("label")),
))
# [label]: url
Definition = (Group(LineStart() + "[" + SkipTo("]:")("label") + "]:" + SkipTo(LineEnd())("url"))
.set_name("Definition"))
# https://github.com/syntax-tree/mdast#definition
Definition.set_parse_action(lambda tokens: DefinitionNode(
label = tokens[0]["label"],
identifier = normalize_label(tokens[0]["label"]),
url = tokens[0]["url"],
))
# ```auspiceMainDisplayMarkdown
# ... (unparsed)
# ```
AuspiceMainDisplayMarkdownStart = LineStart() + "```auspiceMainDisplayMarkdown" + LineEnd()
AuspiceMainDisplayMarkdownEnd = LineStart() + "```" + LineEnd()
AuspiceMainDisplayMarkdown = (Group(AuspiceMainDisplayMarkdownStart + ... + AuspiceMainDisplayMarkdownEnd)
.set_name("AuspiceMainDisplayMarkdown"))
# Specific case of the more general https://github.com/syntax-tree/mdast#code;
# we don't parse all code blocks.
AuspiceMainDisplayMarkdown.set_parse_action(lambda tokens: CodeNode(
lang = "auspiceMainDisplayMarkdown",
meta = None,
value = "".join(tokens[0].get("_skipped", [])),
))
# Parse just what we need to and pass thru the rest.
ParsedMarkdown = Image | ImageReference | Definition | AuspiceMainDisplayMarkdown
UnparsedMarkdown = SkipTo(ParsedMarkdown)
Markdown = ZeroOrMore(ParsedMarkdown | UnparsedMarkdown) + SkipTo(StringEnd())
NodeListNode = Union[Node, str]
NodeList = Iterable[NodeListNode]
def parse(markdown: str) -> NodeList:
"""
Parse a *markdown* string into a flat list of nodes consisting of
:py:cls:`Node` subclasses for parsed constructs and plain strings for raw,
unparsed content.
"""
return list(Markdown.parse_string(markdown, parse_all = True))
def generate(nodes: NodeList) -> str:
"""
Generate Markdown from the given *nodes* list, such as that returned by
:func:`parse`.
"""
return "".join(_generate(nodes))
def _generate(nodes: NodeList):
for node in nodes:
if isinstance(node, str):
yield node
elif isinstance(node, ImageNode):
alt, url, title = attrgetter("alt", "url", "title")(node)
if title is not None:
yield f""
else:
yield f""
elif isinstance(node, ImageReferenceNode):
alt, label, identifier, referenceType = attrgetter("alt", "label", "identifier", "referenceType")(node)
if referenceType is ReferenceType.full:
yield f"![{alt}][{label or identifier}]"
elif referenceType is ReferenceType.collapsed:
yield f"![{alt}][]"
elif referenceType is ReferenceType.shortcut:
yield f"![{alt}]"
else:
raise AssertionError(f"unknown image reference type {referenceType!r} in node: {node!r}")
elif isinstance(node, DefinitionNode):
label, identifier, url = attrgetter("label", "identifier", "url")(node)
yield f"[{label or identifier}]: {url}"
elif isinstance(node, CodeNode) and node.lang == "auspiceMainDisplayMarkdown":
yield f"```auspiceMainDisplayMarkdown\n"
yield node.value
yield f"```\n"
else:
raise AssertionError(f"unknown Markdown node: {node!r}")
def embed_images(nodes: NodeList, base_path: Path) -> NodeList:
"""
Return a modified *nodes* list with local images (potentially relative to
*base_path*) converted to embedded ``data:`` URLs.
In the case where *nodes* was parsed from a local Markdown file,
*base_path* should be the containing directory of that file.
Neither *nodes* itself nor its contained :cls:`Node` instances are modified
in place. Instead, new :cls:`Node` instances are constructed as necessary
and unchanged nodes are passed thru unmodified to avoid potentially
expensive copies.
"""
# Collect definitions so we can look them up when we encounter a reference.
# If there are duplicate ids, the first one (in source order) wins.
definitions = {
n.identifier: n
for n in reversed(nodes)
if isinstance(n, DefinitionNode) }
# We'll modify "definitions" in the first pass as we go, so keep a copy of
# the originals around for diffing in the second pass.
original_definitions = copy(definitions)
# First pass to create new definitions for the image data: URLs.
def first_pass(nodes: NodeList) -> Generator[NodeListNode, None, None]:
for node in nodes:
if isinstance(node, ImageNode):
data_url = as_data_url(node.url, base_path)
if data_url:
# Image references can't have a title, so if we have a
# title we have to inline the long data: URL.
if node.title is not None:
yield ImageNode(
alt = node.alt,
url = data_url,
title = node.title)
else:
# Otherwise, we prefer to add a new definition and convert
# this image to an image reference so we can sequester the
# long data: URL definition to the bottom of the document.
definition = DefinitionNode(
label = node.url,
identifier = normalize_label(node.url),
url = data_url)
definitions[definition.identifier] = definition
yield ImageReferenceNode(
alt = node.alt,
label = definition.label,
identifier = definition.identifier,
referenceType = ReferenceType.full)
else:
yield node
elif isinstance(node, ImageReferenceNode):
if node.identifier in definitions:
definition = definitions[node.identifier]
data_url = as_data_url(definition.url, base_path)
if data_url:
# Replace the original definition because we can't have
# definitions which point to other definitions. On the
# second pass, we'll filter out the original and emit
# the new one at the bottom of the document.
definitions[definition.identifier] = (
DefinitionNode(
label = definition.label,
identifier = definition.identifier,
url = data_url))
# Always yield the original image reference since the
# identifier is unchanged.
yield node
elif isinstance(node, CodeNode) and node.lang == "auspiceMainDisplayMarkdown":
# Recursively embed images inside the main display content too
yield CodeNode(
lang = node.lang,
meta = node.meta,
value = generate(embed_images(parse(node.value), base_path)))
else:
yield node
nodes = list(first_pass(nodes))
# Second pass to drop replaced definitions and add new ones at the end,
# sequestering the long data: URLs to the bottom of the document.
to_drop = [d for d in original_definitions.values() if d not in definitions.values()]
to_add = [d for d in definitions.values() if d not in original_definitions.values()]
def second_pass(nodes: NodeList) -> Generator[NodeListNode, None, None]:
yield from (n for n in nodes if n not in to_drop)
yield "\n\n"
for n in reversed(to_add): # reversed() to undo the original reversed() above
yield n
yield "\n"
nodes = list(second_pass(nodes))
return nodes
def as_data_url(url: str, base_path: Path) -> Optional[str]:
"""
Convert *url* to a ``data:`` URL if it refers to a local file (potentially
relative to *base_path*).
*url* must be a bare path (e.g. ``a/b/c.png``) or a ``file:`` URL without a
hostname part (e.g. ``file:///a/b/c.png``).
Returns ``None`` if *url* doesn't refer to a local file (or the file
doesn't exist).
"""
url_ = urlsplit(url)
# Must be a bare path or a file:///path URL
if (url_.scheme, url_.netloc) not in {("", ""), ("file", "")}:
return None
file = base_path / url_.path
# Whelp, the file's missing, but there's nothing we can do about it.
if not file.exists():
# XXX TODO: issue a warning?
return None
content = urlquote(b64encode(file.read_bytes()).decode("utf-8"))
content_type, _ = mimetypes.guess_type(file.name)
if not content_type:
content_type = "application/octet-stream"
return f"data:{content_type};base64,{content}"
def normalize_label(label: str) -> str:
"""
Per the `mdast spec for an Association <https://github.com/syntax-tree/mdast#association>`__:
To normalize a value, collapse markdown whitespace (``[\\t\\n\\r ]+``)
to a space, trim the optional initial and/or final space, and perform
case-folding.
"""
return re.sub(r'[\t\n\r ]+', ' ', label).strip(" ").casefold()
def reference_type(label: str) -> ReferenceType:
"""
Per the `mdast spec for a ReferenceType <https://github.com/syntax-tree/mdast#referencetype>`__::
![alt][label] a "full" reference
![alt][] a "collapsed" reference
![alt] a "shortcut" reference
"""
return (
ReferenceType.shortcut if label is None else
ReferenceType.collapsed if not label else
ReferenceType.full
)
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.engine.models.base import AutoMarshallingModel
class Absolute(AutoMarshallingModel):
def __init__(self, max_server_meta=None, max_personality=None,
max_image_meta=None, max_personality_size=None,
max_security_group_rules=None, max_total_keypairs=None,
total_ram_used=None, total_instances_used=None,
max_security_groups=None, total_floating_ips_used=None,
max_total_cores=None, total_security_groups_used=None,
max_total_floating_ips=None, max_total_instances=None,
total_cores_used=None, max_total_ram_size=None):
super(Absolute, self).__init__()
self.max_server_meta = max_server_meta
self.max_personality = max_personality
self.max_image_meta = max_image_meta
self.max_personality_size = max_personality_size
self.max_security_group_rules = max_security_group_rules
self.max_total_key_pairs = max_total_keypairs
self.total_ram_used = total_ram_used
self.total_instances_used = total_instances_used
self.max_security_groups = max_security_groups
self.total_floating_ips_used = total_floating_ips_used
self.max_total_cores = max_total_cores
self.total_security_groups_used = total_security_groups_used
self.max_total_floating_ips = max_total_floating_ips
self.max_total_instances = max_total_instances
self.total_cores_used = total_cores_used
self.max_total_ram_size = max_total_ram_size
@classmethod
def _dict_to_obj(cls, absolute_dict):
return Absolute(absolute_dict.get('maxServerMeta'),
absolute_dict.get('maxPersonality'),
absolute_dict.get('maxImageMeta'),
absolute_dict.get('maxPersonalitySize'),
absolute_dict.get('maxSecurityGroupRules'),
absolute_dict.get('maxTotalKeypairs'),
absolute_dict.get('totalRAMUsed'),
absolute_dict.get('totalInstancesUsed'),
absolute_dict.get('maxSecurityGroups'),
absolute_dict.get('totalFloatingIpsUsed'),
absolute_dict.get('maxTotalCores'),
absolute_dict.get('totalSecurityGroupsUsed'),
absolute_dict.get('maxTotalFloatingIps'),
absolute_dict.get('maxTotalInstances'),
absolute_dict.get('totalCoresUsed'),
absolute_dict.get('maxTotalRAMSize'))
@classmethod
def _xml_ele_to_obj(cls, absolute_xml):
limit = absolute_xml.findall('limit')
absolute_dict = dict((l.attrib['name'], l.attrib['value'])
for l in limit)
return cls._dict_to_obj(absolute_dict)
|
import logging
from hashlib import sha1
from pylru import lrucache
from py2neo import Node
from aleph.graph.util import BASE_NODE, GraphType
log = logging.getLogger(__name__)
class NodeType(GraphType):
_instances = {}
def __init__(self, name, fingerprint='fingerprint', indices=[],
hidden=False, node=True):
self.name = name
self.fingerprint = fingerprint
self.indices = indices + [fingerprint]
self.hidden = hidden
self.labels = [name]
# Collections don't have IDs and fingerprints:
if node:
self.labels.append(BASE_NODE)
self._instances[name] = self
def ensure_indices(self, graph):
existing = graph.schema.get_indexes(self.name)
log.info("Creating indexes on: %s", self.name)
for prop in self.indices:
if prop not in existing:
graph.schema.create_index(self.name, prop)
def _get_tx_cache(self, tx):
if not hasattr(tx, '_node_lru_cache'):
tx._node_lru_cache = lrucache(5000)
return tx._node_lru_cache
def get_cache(self, tx, fingerprint):
cache = self._get_tx_cache(tx)
if (self.name, fingerprint) in cache:
return cache[(self.name, fingerprint)]
def set_cache(self, tx, fingerprint, node):
cache = self._get_tx_cache(tx)
cache[(self.name, fingerprint)] = node
def merge(self, tx, **props):
fp = props.get(self.fingerprint)
if fp is None:
return
props['id'] = self.gen_id(fp)
node = Node(*self.labels, **props)
tx.merge(node, self.name, self.fingerprint)
self.set_cache(tx, fp, node)
return node
def to_dict(self):
return {'name': self.name}
def gen_id(self, fp):
idkey = sha1(self.name)
idkey.update(unicode(fp).encode('utf-8'))
return idkey.hexdigest()
@classmethod
def dict(cls, node):
data = dict(node)
for label in node.labels():
if label != BASE_NODE:
data['$label'] = label
return data
|
import numpy as np
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# np.set_printoptions(precision=3)
ROWS = 1
COLS = 5
LABELS = ['LOCATION', 'X', 'Y', 'Z', 'MAGNITUDE']
DECIMALS = 3
class Table(QVBoxLayout):
def __init__(self, parent):
super(Table, self).__init__()
self.parent = parent
self.calc = parent.calc
self.table = QTableWidget()
self.table.setColumnCount(COLS)
self.table.setHorizontalHeaderLabels(LABELS)
# self.make_table(np.random.rand(3,3), mask_radius=8)
self.init_table(np.random.rand(3,3))
header = self.table.horizontalHeader()
for i in range(COLS):
header.setSectionResizeMode(i, QHeaderView.Stretch)
self.addWidget(self.table)
def init_table(self,
locations=None):
if locations.ndim == 1:
ROWS = 1
locations = np.expand_dims(locations, axis=0)
else:
ROWS = len(locations)
self.table.setRowCount(ROWS)
for i, location in enumerate(locations):
self.table.setItem(i, 0, QTableWidgetItem(str(tuple(location.round(DECIMALS)))))
self.table.setItem(i, 1, QTableWidgetItem(''))
self.table.setItem(i, 2, QTableWidgetItem(''))
self.table.setItem(i, 3, QTableWidgetItem(''))
self.table.setItem(i, 4, QTableWidgetItem(''))
def make_table(self,
locations,
mask_radius):
if locations.ndim == 1:
ROWS = 1
locations = np.expand_dims(locations, axis=0)
else:
ROWS = len(locations)
fields = self.calc.calculate_field(locations=locations,
return_vector=True,
mask_radius=mask_radius)
self.table.setRowCount(ROWS)
for i, (location, field) in enumerate(zip(locations, fields)):
self.table.setItem(i, 0, QTableWidgetItem(str(tuple(location.round(DECIMALS)))))
self.table.setItem(i, 1, QTableWidgetItem(str(field[0])))
self.table.setItem(i, 2, QTableWidgetItem(str(field[1])))
self.table.setItem(i, 3, QTableWidgetItem(str(field[2])))
self.table.setItem(i, 4, QTableWidgetItem(str(np.linalg.norm(field))))
|
"""
Messi's Goal Total
Use variables to find the sum of the goals Messi scored
in 3 competitions
Information
Messi goal scoring statistics:
Competition Goals
La Liga 43
Champions League 10
Copa del Rey 5
Task
Create these three variables and store the appropriate
values using the table above:
la_liga_goals
champions_league_goals
copa_del_rey_goals
Create a fourth variable named total_goals that stores
the sum of all of Messi's goals for this year.
"""
la_liga_goals = 43
champions_league_goals = 10
copa_del_rey_goals = 5
total_goals = la_liga_goals + champions_league_goals + copa_del_rey_goals
print(total_goals) |
"""Code to interact with the login server from a python shell.
Usage:
$ python
...
>>> from client import *
>>> admin.addUser('fred', 's3cr3t')
>>> fred = login.login('fred', 's3cr3t')
The 'fred' value is a proxy to a persistent UserCaps object to which
we can add 'capabilities', other persistent objects that are available
to the 'fred' user for getting things done.
"""
from serf.rpc_handler import RPCHandler
from serf.transport import Transport
from serf.eventlet_thread import EventletThread
from serf.proxy import Proxy
from serf.repl_proxy import REPLProxy
SERVER = '127.0.0.1:6508'
net = Transport()
thread = EventletThread()
rpc = RPCHandler(net, {}, t_model=thread)
rpc.safe.append('serf.tables')
def remote(x):
return REPLProxy(rpc.makeProxy(x, SERVER), thread)
thread.start(True)
thread.callFromThread(net.start)
admin = remote('admin')
login = remote('login')
users = remote('users')
|
from __future__ import print_function
from install_requirements import is_dependencies_satisfied
import sys
if not is_dependencies_satisfied():
print("some packages are missing, please type: \"python install_requirements.py\"", file=sys.stderr)
exit(1)
from utils import err_print
import requests
import re
import os
import bz2file
import time
import subprocess
def map_downloader(url):
try:
subprocess.call(["wget", "-O", "map.osm.bz2", url, "--no-check-certificate"])
except OSError as e:
if e.errno == os.errno.ENOENT:
err_print("wget not found!\nplease, install it, it's available both Linux and Windows") # handle file not found error
else:
raise # Something else went wrong while trying to run `wget`
def substring_after(s, delim):
return s.partition(delim)[2]
URL_BASE = 'https://mapzen.com'
url = URL_BASE + '/data/metro-extracts/'
responce = requests.get(url)
list_of_content = [line for line in responce.iter_lines()]
if len(sys.argv) > 1:
my_city = sys.argv[1]
else:
my_city = 'Prague'
all_cities = []
for line in list_of_content:
if "class=\"city\"" in line:
line = re.split("<|>", line)
all_cities.append(line[-3])
if line[-3] == my_city:
city_url = substring_after(line[1], "href=")
url = URL_BASE + city_url.replace("\"", "")
responce = requests.get(url)
list_of_content = [line for line in responce.iter_lines()]
for line in list_of_content:
if "OSM XML" in line:
line = re.split(" |<|>", line) # cut string into list
print("size:", line[-5]) # size in MB
downloading_page = substring_after(line[11], "=") # http download
map_downloader(downloading_page.replace("\"", ""))
start_time = time.time()
bz_file = bz2file.open("map.osm.bz2")
with open("map.osm", "w") as out: # decompress bz2 file
out.write(bz_file.read())
out.close()
print("time:", (time.time() - start_time))
exit()
err_print("spell your city correctly, or choose another one from this list: {}".format(sorted(all_cities)))
|
import socket
from ip2geotools.databases.noncommercial import DpIpCity
url = input("can abi, lütfen linki girermisin? : ")
IP = socket.gethostbyname(url)
response = DpIpCity.get(IP, api_key='free')
print("IP adresi:", IP)
print("bulunduğu şehir:", response.city)
print("bulunduğu bölge:", response.region)
print("bulunduğu ülke:", response.country)
|
from collections import defaultdict
s = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)]
d = defaultdict(set)
for k, v in s:
d[k].add(v)
print(d)
s = 'mississippi'
d = defaultdict(int)
for k in s:
d[k] += 1
print(d)
from collections import defaultdict
s = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)]
d = defaultdict(list)
for k, v in s:
d[k].append(v)
print(d)
data = [1, 2, 1, 3, 3, 1, 4, 2]
# %matplotlib inline
import matplotlib.pyplot as plt
plt.hist(data)
plt.show() |
from django.db import models
# apps tercero
from PIL import Image
from . managers import CursoManager
# Create your models here.
class Curso(models.Model):
""" Modelo para tabla curso """
nombre = models.CharField('Nombre', max_length=60)
direccion = models.CharField('Destino', max_length=60)
fecha = models.DateField('Fecha',blank=False, null=False,)
horario = models.TimeField('Horario',auto_now=False, auto_now_add=False)
mapa = models.URLField('Mapa', max_length=500)
objects= CursoManager()
def __str__(self):
return str(self.id)
|
# Generated by Django 3.2.3 on 2021-06-03 00:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('smoker', models.BooleanField(blank=True, default=False)),
('packsSmoked', models.IntegerField(blank=True, default=0)),
('yearsSmoked', models.IntegerField(blank=True, default=0)),
('packyears', models.IntegerField(blank=True, default=0)),
('pregnancy', models.BooleanField(blank=True, default=False)),
('asthama', models.BooleanField(blank=True, default=False)),
('asthama_age', models.IntegerField(blank=True, default=0)),
('lung_disease', models.BooleanField(blank=True, default=False)),
('lung_disease_desc', models.CharField(max_length=50)),
('diabetes', models.BooleanField(blank=True, default=False)),
('lung_cancer', models.BooleanField(blank=True, default=False)),
('additional_probs', models.BooleanField(blank=True, default=False)),
('clinical_trials', models.BooleanField(blank=True, default=False)),
('atrial', models.BooleanField(blank=True, default=False)),
('bph', models.BooleanField(blank=True, default=False)),
('bph_stable', models.BooleanField(blank=True, default=False)),
('comments', models.CharField(max_length=500)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='user.user')),
],
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 16 21:07:40 2017
@author: katsuhisa
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# for visualization
import matplotlib.pyplot as plt
import seaborn as sns
# import data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
print(train.dtypes)
# string label to categorical values
from sklearn.preprocessing import LabelEncoder
for i in range(train.shape[1]):
if train.iloc[:,i].dtypes == object:
lbl = LabelEncoder()
lbl.fit(list(train.iloc[:,i].values) + list(test.iloc[:,i].values))
train.iloc[:,i] = lbl.transform(list(train.iloc[:,i].values))
test.iloc[:,i] = lbl.transform(list(test.iloc[:,i].values))
# search for missing data
import missingno as msno
msno.matrix(df=train, figsize=(20,14), color=(0.5,0,0))
# keep ID for submission
train_ID = train['Id']
test_ID = test['Id']
# split data for training
y_train = train['SalePrice']
X_train = train.drop(['Id','SalePrice'], axis=1)
X_test = test.drop('Id', axis=1)
# dealing with missing data
Xmat = pd.concat([X_train, X_test])
Xmat = Xmat.drop(['LotFrontage','MasVnrArea','GarageYrBlt'], axis=1)
Xmat = Xmat.fillna(Xmat.median())
# feature importance using random forest
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=80, max_features='auto')
rf.fit(X_train, y_train)
print('Training done using Random Forest')
ranking = np.argsort(-rf.feature_importances_)
f, ax = plt.subplots(figsize=(11, 9))
sns.barplot(x=rf.feature_importances_[ranking], y=X_train.columns.values[ranking], orient='h')
ax.set_xlabel("feature importance")
plt.tight_layout()
plt.show()
# use the top 30 features only
X_train = X_train.iloc[:,ranking[:30]]
X_test = X_test.iloc[:,ranking[:30]]
# interaction between the top 2
X_train["Interaction"] = X_train["TotalSF"]*X_train["OverallQual"]
X_test["Interaction"] = X_test["TotalSF"]*X_test["OverallQual"]
# relation to the target
fig = plt.figure(figsize=(12,7))
for i in np.arange(30):
ax = fig.add_subplot(5,6,i+1)
sns.regplot(x=X_train.iloc[:,i], y=y_train)
plt.tight_layout()
plt.show()
# outlier deletion
Xmat = X_train
Xmat['SalePrice'] = y_train
Xmat = Xmat.drop(Xmat[(Xmat['TotalSF']>5) & (Xmat['SalePrice']<12.5)].index)
Xmat = Xmat.drop(Xmat[(Xmat['GrLivArea']>5) & (Xmat['SalePrice']<13)].index)
# recover
y_train = Xmat['SalePrice']
X_train = Xmat.drop(['SalePrice'], axis=1)
# XGBoost
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
print("Parameter optimization")
xgb_model = xgb.XGBRegressor()
reg_xgb = GridSearchCV(xgb_model,
{'max_depth': [2,4,6],
'n_estimators': [50,100,200]}, verbose=1)
reg_xgb.fit(X_train, y_train)
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
def create_model(optimizer='adam'):
model = Sequential()
model.add(Dense(X_train.shape[1], input_dim=X_train.shape[1], kernel_initializer='normal', activation='relu'))
model.add(Dense(16, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
model = KerasRegressor(build_fn=create_model, verbose=0)
# define the grid search parameters
optimizer = ['SGD','Adam']
batch_size = [10, 30, 50]
epochs = [10, 50, 100]
param_grid = dict(optimizer=optimizer, batch_size=batch_size, epochs=epochs)
reg_dl = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
reg_dl.fit(X_train, y_train)
# SVR
from sklearn.svm import SVR
reg_svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
reg_svr.fit(X_train, y_train)
# second feature matrix
X_train2 = pd.DataFrame( {'XGB': reg_xgb.predict(X_train),
'DL': reg_dl.predict(X_train).ravel(),
'SVR': reg_svr.predict(X_train),
})
# second-feature modeling using linear regression
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(X_train2, y_train)
# prediction using the test set
X_test2 = pd.DataFrame( {'XGB': reg_xgb.predict(X_test),
'DL': reg_dl.predict(X_test).ravel(),
'SVR': reg_svr.predict(X_test),
})
# Don't forget to convert the prediction back to non-log scale
y_pred = np.exp(reg.predict(X_test2))
# submission
submission = pd.DataFrame({
"Id": test_ID,
"SalePrice": y_pred
})
submission.to_csv('houseprice.csv', index=False)
|
from telegram.ext import Updater, CommandHandler,MessageHandler,Filters,InlineQueryHandler,CallbackQueryHandler
from telegram import InlineKeyboardButton, InlineKeyboardMarkup,ReplyKeyboardMarkup,ReplyMarkup,KeyboardButton,InputTextMessageContent,InlineQueryResultArticle,KeyboardButton
import telegram
import os
TOKEN = os.environ['TOKEN']
def start(update,context):
first_name = update.message.from_user.first_name
text = f'Hush kelibsiz {first_name} !\n biznig online do`kondan Noutbook 💻 va kompyuterlar 🖥, \nkompyuter extiyot qismlari ⌨️\nKompyuter qo`shimcha qurilmalarini topishingiz mumkin 🖨.\nMarhamt !'
interfaoldoska = KeyboardButton(text='Interfaol doska')
interaktivdispley = KeyboardButton(text='Interaktiv displey')
kompyuter = KeyboardButton(text='Kompyuter(pc) 🖥')
monoblok = KeyboardButton(text='Monoblok')
notebook = KeyboardButton(text='Notebook 💻')
extiyotqismlar = KeyboardButton(text='Extiyot qismlar ⚙️')
monitor = KeyboardButton(text='Monitor 🖥')
printer = KeyboardButton(text='Printer 🖨')
aksesuar = KeyboardButton(text='Aksesuar 💾')
keyboard = ReplyKeyboardMarkup(
[
[kompyuter,notebook],
[interaktivdispley,interfaoldoska],
[monoblok,monitor],
[extiyotqismlar,printer],
[aksesuar]
],
resize_keyboard=True
)
update.message.reply_text(text=text,reply_markup=keyboard)
def kompyuter(update,context):
hp = InlineKeyboardButton(text='HP',switch_inline_query_current_chat='hp')
legion = InlineKeyboardButton(text='Legion',switch_inline_query_current_chat='legion')
zotac = InlineKeyboardButton(text='Zotac',switch_inline_query_current_chat='zotac')
reply_markup = InlineKeyboardMarkup(
[
[hp,legion],
[zotac]
]
)
update.message.reply_text(text='Kompyuterlar (PC)🖥:',reply_markup=reply_markup)
def hp(update,context):
text1='HP 460-a210ur MicroTower (SUS) \nIntel Pentium-J3710\nDDR4 4GB/ HDD 1000GB\nNo DVD/ kbd/ USBmouse \nFreeDos\nRUS (4XJ29EA) (без монитора)'
m1 = InputTextMessageContent(
message_text=text1
)
result1 = InlineQueryResultArticle(
title='HP 460-a210ur MicroTower',
input_message_content=m1,
description='120$',
id=1
)
results = [result1]
update.inline_query.answer(results)
def hp(update,context):
text1='HP 460-a210ur MicroTower (SUS) \nIntel Pentium-J3710\nDDR4 4GB/ HDD 1000GB\nNo DVD/ kbd/ USBmouse \nFreeDos\nRUS (4XJ29EA) (без монитора)'
text2='HP 290 G2 MicroTower (I5M)\nIntel Pentium-G5400\nDDR4 4GB/ HDD 1000GB\nNo DVD/ kbd/ USBmouse \nLCD 21,5" HP (3WP71AA) \nFreeDos\nRUS (5JP16ES)'
text3='HP Desktop Pro G2 MicroTower (111/112)\nIntel i3 - 8100\nDDR4 4GB\nHDD 500GB/ DVD\nkbd/ USBmouse \nбез монитора \nFreeDos\nRUS (5QL16EA)'
text4='HP 290 G2 MicroTower (SP2) \nCore i3-8100/ DDR4 4GB\nHDD 1TB/ DVD-RW\nLCD HP N246v 23.8"\nDOS /RUS (4YV34ES)'
text5='HP 290 G2 MicroTower (8RY) \nIntel i5-8500\nDDR4 4GB/ HDD 500GB\nDVD\nkbd/ USBmouse \nLCD HP 24" N246v \nFreeDos\nRUS (4YV42ES)'
text6='HP Omen Obelisk 875-0011ur (3ii)\nIntel i5-8500\nDDR4 8GB/HDD 1TB\nGeForce 1060 3GB\nWi-Fi/keyboard+mouse\nBT\nWin10\nJet Black (4UE94EA) No Monitor'
text7='HP Omen 880-192ur (X5M) \nCore i7-9700K\nDDR4 16GB/ HDD 1TB\n256GB SSD\nNVIDIA GeForce GTX 2070 8GB/noDVD\nWin 10\nBlack (3QZ80EA) No Monitor'
m1 = InputTextMessageContent(
message_text=text1
)
result1 = InlineQueryResultArticle(
title='HP 460-a210ur MicroTower (SUS)\nIntel Pentium-J3710',
input_message_content=m1,
description='120$',
id=1
)
m2 = InputTextMessageContent(
message_text=text2
)
result2 = InlineQueryResultArticle(
title='HP 290 G2 MicroTower (I5M)\nIntel Pentium-G5400',
input_message_content=m2,
description='130$',
id=2
)
m3 = InputTextMessageContent(
message_text=text3
)
result3 = InlineQueryResultArticle(
title='HP Desktop Pro G2 MicroTower (111/112)\nIntel i3 - 8100',
input_message_content=m3,
description='140$',
id=3
)
m4 = InputTextMessageContent(
message_text=text4
)
result4 = InlineQueryResultArticle(
title='HP 290 G2 MicroTower (SP2)\nCore i3-8100',
input_message_content=m4,
description='165$',
id=4
)
m5 = InputTextMessageContent(
message_text=text5
)
result5 = InlineQueryResultArticle(
title='HP 290 G2 MicroTower (8RY)\nIntel i5-8500',
input_message_content=m5,
description='180$',
id=5
)
m6 = InputTextMessageContent(
message_text=text6
)
result6 = InlineQueryResultArticle(
title='HP Omen Obelisk 875-0011ur (3ii)\nIntel i5-8500',
input_message_content=m6,
description='200$',
id=6
)
m7 = InputTextMessageContent(
message_text=text7
)
result7 = InlineQueryResultArticle(
title='HP Omen 880-192ur (X5M)\nCore i7-9700K',
input_message_content=m7,
description='220$',
id=7
)
results = [result1,result2,result3,result4,result5,result6,result7]
update.inline_query.answer(results)
def legion(update,context):
text = 'Legion C530-19ICB Personal Computer\nIntel i5-8400\nDDR4 16GB\nHDD 1000GB\nVGA 4GB Nvidia GTX 1050TI\nNo DVD\nNo kbd\nNo Monitor'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Legion C530-19ICB Personal Computer\nIntel i5-8400',
input_message_content=m,
description='190$',
id=1
)
results = [result]
update.inline_query.answer(results)
def zotac(update,context):
text='Zotac G1107TK700B-U MEK1 Gaming\nIntel Core i7-7700\n16GB DDR4 RAM\n240GB NVMe SSD\n1TB HDD/ Video GeForce GTX 1070 Ti\nkeyboard\nmmouse\nWindows 10'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Zotac G1107TK700B-U MEK1 Gaming\nIntel Core i7-7700',
input_message_content=m,
description='250$',
id=1
)
results = [result]
update.inline_query.answer(results)
def notebook(update,context):
asus = InlineKeyboardButton(text='Asus',switch_inline_query_current_chat='asus')
lenovo = InlineKeyboardButton(text='Lenovo',switch_inline_query_current_chat='lenovo')
acer = InlineKeyboardButton(text='Acer',switch_inline_query_current_chat='acer')
hp = InlineKeyboardButton(text='HP',switch_inline_query_current_chat='HP')
reply_markup = InlineKeyboardMarkup(
[
[asus,lenovo],
[acer,hp]
]
)
update.message.reply_text(text='Notebooks 💻:',reply_markup=reply_markup)
def asus(update,context):
text='ASUS X540M\nCeleron 4000\nDDR4 4GB\n500GB HDD\n15.6" HD LED\nUMA\nNo DVD\nRUS (без ОС)'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='ASUS X540M\nCeleron 4000',
input_message_content=m,
description='258$',
id=1
)
results = [result]
update.inline_query.answer(results)
def lenovo(update,context):
text='Lenovo Ideapad L340 \nIntel i3-8145U\nDDR4 4 GB\nHDD 1000GB \n15.6" HD TN\n2GB NVIDIA GeForce MX110\nNo DVD\nRUS (81LG007URK)'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Lenovo Ideapad L340 \nIntel i3-8145U',
input_message_content=m,
description='445$',
id=1
)
results = [result]
update.inline_query.answer(results)
def acer(update,context):
text='Acer Extensa 2519\nCeleron 3060\nDDR3 4 GB\n500GB HDD\n15.6" HD LED\nUMA/ DVD \nRUS (NX.EFAER.122)'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Acer Extensa 2519\nCeleron 3060',
input_message_content=m,
description='250$',
id=1
)
results = [result]
update.inline_query.answer(results)
def HP(update,context):
text='HP 15-rb028ur (385/136) \nAMD Dual-Core A4-9120\nDDR3 4 GB\nHDD 500GB\n15.6" HD LED\nAMD Radeon R3 integrated\nNo DVD \nRUS (4US49EA)'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='HP 15-rb028ur (385/136) \nAMD Dual-Core A4-9120',
input_message_content=m,
description='242$',
id=1
)
results = [result]
update.inline_query.answer(results)
def Interfaoldoska(update,context):
tanlash = InlineKeyboardButton(text='Tanlash',switch_inline_query_current_chat='tanlash1')
reply_markup = InlineKeyboardMarkup(
[
[tanlash]
]
)
update.message.reply_text(text='Interfaol doska tanlash:',reply_markup=reply_markup)
def tanlash1(update,context):
text='''"Интерактивная доска FPB 10 points 82"" interactive whiteboard PH82\nФормат: 4:3, Активная поверхность доски: 1648x1176мм, Сенсорная технология: оптическая, Разрешение: 32768*32768, Время отклика: 10мс, Управление: 10 touch, \nстилус,пальцы или другие непрозрачные предметы, Материал доски: стальная поверхность, Интерфейс: USB 2.0, USB 3.0"'''
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Интерактивная доска FPB 10 points 82',
input_message_content=m,
description='$',
id=1
)
results = [result]
update.inline_query.answer(results)
def Interaktivdispley(update,context):
tanlash = InlineKeyboardButton(text='Tanlash',switch_inline_query_current_chat='tanlash2')
reply_markup = InlineKeyboardMarkup(
[
[tanlash]
]
)
update.message.reply_text(text='Interfaol doska tanlash:',reply_markup=reply_markup)
def tanlash2(update,context):
text='''"Интерактивная сенсорная панель FPB 65”\nДиагональ – 65”, Количество касаний – 10 (палец, перо или любые другие непрозрачные объекты), Активный размер - 1428x803мм, Соотношение сторон – 16:9, \nЯркость – 450cd/m2, Контраст – 5000:1, Подсветка – LED, Угол обзора – 178x178, Разрешение – 3840 х 2160 UHD 4K, Стерео система – 2x10W, Процессор – Intel Core i5-6400 2.7Ghz, \nОперативная память – 8GB, Жёсткий диск – SSD 240GB, Сеть – Gigabyte lan, Wi-Fi, Порты - HDMI, USB, VGA, audio, RJ-45"'''
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Интерактивная сенсорная панель FPB 65',
input_message_content=m,
description='-',
id=1
)
results = [result]
update.inline_query.answer(results)
def monoblok(update,context):
hp = InlineKeyboardButton(text='HP',switch_inline_query_current_chat='monoblokhp')
lenovo = InlineKeyboardButton(text='Lenovo',switch_inline_query_current_chat='lenovomonoblok')
reply_markup = InlineKeyboardMarkup(
[
[hp,lenovo]
]
)
update.message.reply_text(text='Monoblok 💻:',reply_markup=reply_markup)
def hpmonoblok(update,context):
text='HP ProOne 440 G4 (2QW) (Intel Pentium G5400T\nDDR4 8GB\nHDD 1000GB + SSD 128GB\nIntel UHD Graphics 630\n DVD/FHD 23,8" (1920 x 1080)\nkey + mouse) (5JP19ES)'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='HP ProOne 440 G4 (2QW) (Intel Pentium G5400T',
input_message_content=m,
description='$',
id=1
)
results = [result]
update.inline_query.answer(results)
def lenovomonoblok(update,context):
text='Lenovo IdeaCentre AIO V530-24ICB (Intel i5-8400T\nDDR4 8GB/ HDD 1000GB \n23,8 FHD (1920x1080)\nIntel HD Graphics 630\nDVD-RW/ Wi-Fi \nkeyboard + mouse) Black (10UW000ARU)'
m = InputTextMessageContent(
message_text=text
)
result = InlineQueryResultArticle(
title='Lenovo IdeaCentre AIO V530-24ICB (Intel i5-8400T',
input_message_content=m,
description='$',
id=1
)
results = [result]
update.inline_query.answer(results)
def qismlar(update,context0):
protsessor = InlineKeyboardButton(text='Protsessor', switch_inline_query_current_chat='Protsessor')
Materinskayaplata = InlineKeyboardButton(text='Materinskaya plata', switch_inline_query_current_chat='Materinskaya plata')
Videokarta = InlineKeyboardButton(text='Video karta', switch_inline_query_current_chat='Video karta')
Qattiqdisk = InlineKeyboardButton(text='Qattiq disk', callback_data='Qattiqdisk')
DVD = InlineKeyboardButton(text='DVD', switch_inline_query_current_chat='DVD')
OZU = InlineKeyboardButton(text='OZU', switch_inline_query_current_chat='OZU')
Case = InlineKeyboardButton(text='Case', switch_inline_query_current_chat='Case')
Blokpitanie = InlineKeyboardButton(text='Blok pitanie', switch_inline_query_current_chat='Blok pitanie')
Coolers = InlineKeyboardButton(text='Coolers', switch_inline_query_current_chat='Coolers')
reply_markup = InlineKeyboardMarkup(
[
[Qattiqdisk],
[protsessor,Materinskayaplata],
[Videokarta,DVD],
[OZU,Case],
[Blokpitanie,Coolers]
]
)
update.message.reply_text(text = 'Extiyot qismlari:\n',reply_markup=reply_markup)
#def Qattiqdisk(update,context):
updater = Updater(TOKEN)
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(MessageHandler(Filters.text('Extiyot qismlar ⚙️'),qismlar))
updater.dispatcher.add_handler(MessageHandler(Filters.text('Kompyuter(pc) 🖥'),kompyuter))
updater.dispatcher.add_handler(MessageHandler(Filters.text('Notebook 💻'),notebook))
updater.dispatcher.add_handler(MessageHandler(Filters.text('Interfaol doska'),Interfaoldoska))
updater.dispatcher.add_handler(MessageHandler(Filters.text('Interaktiv displey'),Interaktivdispley))
updater.dispatcher.add_handler(MessageHandler(Filters.text('Monoblok'),monoblok))
updater.dispatcher.add_handler(InlineQueryHandler(hp,pattern='hp'))
updater.dispatcher.add_handler(InlineQueryHandler(legion,pattern='legion'))
updater.dispatcher.add_handler(InlineQueryHandler(zotac,pattern='zotac'))
updater.dispatcher.add_handler(InlineQueryHandler(asus,pattern='asus'))
updater.dispatcher.add_handler(InlineQueryHandler(lenovo,pattern='lenovo'))
updater.dispatcher.add_handler(InlineQueryHandler(acer,pattern='acer'))
updater.dispatcher.add_handler(InlineQueryHandler(HP,pattern='HP'))
updater.dispatcher.add_handler(InlineQueryHandler(tanlash1,pattern='tanlash1'))
updater.dispatcher.add_handler(InlineQueryHandler(tanlash1,pattern='tanlash2'))
updater.dispatcher.add_handler(InlineQueryHandler(tanlash1,pattern='monoblokhp'))
updater.dispatcher.add_handler(InlineQueryHandler(lenovomonoblok,pattern='lenovomonoblok'))
#updater.dispatcher.add_handler(InlineQueryHandler(qattiqdisk,pattern='Qattiqdisk'))
updater.start_polling()
updater.idle() |
# TODO pages not supported
# TODO tables not supported
# TODO multi dimensional array in binary are not supported
# struct format
# >: big
# <: little
# |: machine
# x: pad byte (no data);
# c:char;
# b:signed byte;
# B:unsigned byte;
# h:short;
# H:unsigned short;
# i:int;
# I:unsigned int;
# l:long;
# L:unsigned long;
# f:float;
# d:double.
# s:string (array of char);
# p:pascal string (with count byte).
# P:an integer type that is wide enough to hold a pointer.
# q:long long;
# Q:unsigned long long
import io
import gzip
import numpy as n
import struct
from functools import reduce
def iterheader(o):
c = 1
while c:
c = o.read(1)
if c == "!":
while c not in "\n\r":
c = o.read(1)
elif c not in "\n\t\r":
yield c
def iterbinarydata(o):
c = 1
while c:
c = o.read(1)
if c == "!":
while c not in "\n\r":
c = o.read(1)
if c in "\n\r": # possibly a bug
c = o.read(1)
else:
yield c
def readtoken(o):
buf = []
for i in o:
buf.append(i)
if "".join(buf[-4:]) == "&end":
yield "".join(buf)
buf = []
def myreadline(o):
buf = o.readline()
while buf[0] == "!":
buf = o.readline()
return buf
def parseheader(l):
t, data = l.split(" ", 1)
data = data.replace(" ", "")
data = [d.split("=") for d in data.split(",")]
data.pop()
data = dict(data)
data["header"] = t
return data
sddstypes = {
"short": "i2",
"long": "i4",
"llong": "u8",
"string": "S",
"float": "f4",
"double": "f8",
}
def myarray(fh, typ, count, endian):
# print typ
typ = n.dtype(endian + sddstypes.get(typ, typ))
size = typ.itemsize * count
ss = fh.read(size)
if len(ss) == size:
# print typ,count,size,repr(ss[:16])
return n.fromstring(ss, dtype=typ, count=count)
else:
return None
# return s
def mystruct(fh, typ, count, endian):
typ = "%s%d%s" % (endian, count, typ)
size = struct.calcsize(typ)
ss = fh.read(size)
if len(ss) == size:
return struct.unpack(typ, ss)
else:
return None
def mysplit(fh, count):
out = []
while len(out) < count:
l = fh.readline()
out.extend(l.split())
return out
class sddsdata(object):
def __init__(self, filename, endian="little", full=True):
if hasattr(filename, "endswith"):
self.filename = filename
if filename.endswith(".gz"):
fh = gzip.open(filename)
else:
fh = file(filename)
else:
self.filename = "fileobj"
fh = filename
try:
self.version = fh.readline()
fendian = fh.readline().split(" ")[1].split("-")[0]
assert fendian in ["big", "little"]
endian = fendian
except AssertionError:
print("Warning sddsdata: forcing endianess to %s" % endian)
fh.seek(0)
self.version = fh.readline()
self.endian = {"little": "<", "big": ">"}[endian]
# read headear
# print 'read header'
it = readtoken(iterheader(fh))
header = []
for i in it:
header.append(parseheader(i))
if header[-1]["header"] == "&data":
break
header2 = []
istable = True
for i in header:
if i["header"] == "&column":
if istable == True:
header2.append({"header": "&table"})
header2[-1]["columns"] = [i]
istable = False
else:
header2[-1]["columns"].append(i)
else:
header2.append(i)
self.header = header2
# print self.header
# read data
if full:
fh.read(1)
self.data = []
if self.header[-1]["mode"] == "ascii":
data = {}
self.data.append(data)
for i in self.header:
if "type" in i:
typ = i["type"]
typ = n.dtype(sddstypes.get(typ, typ))
if i["header"] == "¶meter":
ss = myreadline(fh)
d = n.array(ss, typ)
i["value"] = d
elif i["header"] == "&array":
dims = list(map(int, myreadline(fh).split()))
i["shape"] = dims
cnt = reduce(lambda a, b: a * b, dims)
# ss=myreadline(fh)
# print dims, len(ss)
d = n.array(mysplit(fh, cnt), typ).reshape(dims)
data[i["name"]] = d
elif self.header[-1]["mode"] == "binary":
while 1:
row = myarray(fh, "long", 1, self.endian)
if row is None:
break
data = {}
self.data.append(data)
for i in self.header:
if "type" in i:
typ = i["type"]
if i["header"] == "¶meter":
if typ == "string":
smax = 0
subcount = myarray(fh, "long", 1, self.endian)[0]
smax = subcount < smax and smax or subcount
d = mystruct(fh, "s", subcount, self.endian)[0]
else:
d = myarray(fh, typ, 1, self.endian)
elif i["header"] == "&array":
count = myarray(fh, "long", 1, self.endian)[0]
if typ == "string":
d = []
smax = 0
for r in range(count):
subcount = myarray(fh, "long", 1, self.endian)[
0
]
smax = subcount < smax and smax or subcount
# d.append(myarray(fh,'>S1',subcount,self.endian)[0])
d.append(
mystruct(fh, "s", subcount, self.endian)[0]
)
d = n.array(d, n.dtype("S%s" % smax))
else:
d = myarray(fh, typ, count, self.endian)
data[i["name"]] = d
fh.close()
def __str__(self):
out = ["%s: %s" % (self.filename, self.version)]
for i in self.header:
oo = []
for n, k in list(i.items()):
if n is not "header":
oo.append("%s=%s" % (n, k))
out.append(i["header"][1:] + " " + ", ".join(oo))
return "\n".join(out)
__repr__ = __str__
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
db = SQLAlchemy()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///market.db'
app.config['SECRET_KEY'] = '3c81070f775a7e7ac6a67c22'
db.init_app(app)
# https://flask-sqlalchemy.palletsprojects.com/en/2.x/contexts/
app.app_context().push()
bcrypt = Bcrypt()
from market import routes
|
from ValueTreeViewItem import ValueTreeViewItem
class HistoryDataDisplayTreeViewEnumTypeItem(ValueTreeViewItem):
def __init__(self, name, value, attribute, parent):
super(HistoryDataDisplayTreeViewEnumTypeItem, self).__init__(item_data=[name, value],
hide_data=attribute,
parent=parent)
def column_count(self):
return 3
def data(self, column):
if column == 0:
return self.item_data[0]
pass
elif column == 1:
return self.hide_data.type_name
pass
elif column == 2:
for _key, _data in self.hide_data.special_data.items():
if _data.value == self.item_data[1]:
return '{value} | {name}'.format(value=_data.value, name=_key)
return str(self.item_data[1])
pass
else:
return None
def set_data(self, column, value):
return False
pass
|
def get_floats():
try:
a_list = [float(x) for x in input("Enter elements of a list separated by space: ").split(' ')]
if len(a_list) > 2:
return a_list
else:
raise ValueError
except ValueError:
print("At least two scores needed!")
quit()
def summerize(a_list):
smallest_number = min(a_list)
a_list.remove(smallest_number)
smallest_number2 = min(a_list)
a_list.remove(smallest_number2)
the_sum = sum(a_list)
print("Sum of scores (two lowest removed):",the_sum)
def main():
a_list = get_floats()
summerize(a_list)
main()
|
# -*- coding:utf-8 -*-
import os
import json
import datetime
import sys
import threading
from time import sleep
from threading import Thread
import time
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
def runFuncWithTimeLimit(func,arg,timeout):
''' Putting func with time limitation.
Args:
func:the func to be limited by timeout
arg: the arg taken by the func
timeout: time limitation by seconds
Return:
result of the func
'''
class TimeoutException(Exception):
pass
ThreadStop = Thread._Thread__stop
class TimeLimited(Thread):
def __init__(self,_error= None,):
Thread.__init__(self)
self._error = _error
self.result=None
def run(self):
try:
self.result = func(*arg)
except Exception,e:
self._error =e
def _stop(self):
if self.isAlive():
ThreadStop(self)
t = TimeLimited()
t.start()
t.join(timeout)
if isinstance(t._error,TimeoutException):
t._stop()
print ('timeout')
return None
if t.isAlive():
t._stop()
print ('timeout')
return None
if t._error is None:
return t.result
return None
class multiThread(object):
def __init__(self, max_thread_count, work_func, lock_func):
'''
Args:
-max_thread_count: the max thread count allowed
-work_func: the thread function without involving shared resources
-lock_func: shared resources involoved part of function codes, the first param of the func must be the result of work_func
'''
self.__max_thread_count = max_thread_count
self.__present_thread_count = 0
self.__work_func= work_func
self.__lock_func = lock_func
self.__lock = threading.Lock()
self.__payloadList = []
self.__paramsList = []
def dispatch(self, args_work,args_lock,payload,timeout):
''' use this function to create a thread with the params needed to pass to the thread functions
Args:
-args_work: args for the work_func
-args_lock: args for the lock_func
-payload: other params need to be processed by thread
-timeout:the max time for running work_func
'''
def threadFunc(args_work,args_lock,payload):
self.__payloadList.append(payload)#record the payload
self.__paramsList.append(args_work)#record the args
r = runFuncWithTimeLimit(self.__work_func,args_work,timeout) #running the work_func with time limitation
self.__lock.acquire()
self.__lock_func(r,*args_lock)# running lock_func, the first param must be the result of work_func
self.__present_thread_count=self.__present_thread_count- 1 # minus 1 for the thread count when this thread is over
self.__payloadList.remove(payload)#remove the payload
self.__paramsList.remove(args_work)#remove the args
self.__lock.release()
# waiting until the threads pool is not full
while True:
if self.__present_thread_count>=self.__max_thread_count:
sleep (1)
else:
break
# add 1 to thread count
self.__present_thread_count=self.__present_thread_count+1
t = threading.Thread(
target=threadFunc, args=(args_work,args_lock,payload))
t.start()
def snapThreadPayloads(self):
'''
get the snap of payloads taken by the threads
'''
self.__lock.acquire()
present_payload=[]
for item in self.__payloadList:
present_payload.append(item)
self.__lock.release()
return present_payload
def snapThreadParams(self):
'''
get the snap of params given to the threads
'''
self.__lock.acquire()
present_params=[]
for item in self.__paramsList:
present_params.append(item)
self.__lock.release()
return present_params
def setMaxThreadCount(self,max_thread_count):
self.__max_thread_count=max_thread_count
if __name__ == '__main__':
#example
def scan(x, y):
#print 'I\'m ' + str(x) + ',' + str(y)
if y==3:
sleep(70)
else:
sleep(3)
return x + y
def record(r,f,i,y):
line=str(i)+','+str(y)+":"+str(r)+'\n'
f.writelines(line)
f.flush()
dp = multiThread(25, scan, record)
f = open('aaaa.txt', 'a')
index=0
for i in range(0, 10):
for y in range(0, 10):
index=index+1
dp.dispatch((i, y),(f,i,y),index,60)
print dp.snapThreadPayloads()
print dp.snapThreadParams()
sleep(13)
f.close()
print "ok"
|
def solution(n, arr1, arr2):
answer = []
for i in range(n):
plus = ''
ans = bin(arr1[i] | arr2[i])[2:]
if len(ans) != n:
plus = '0' * (n-len(ans))
ans = plus + ans
ans = ans.replace('1','#')
ans = ans.replace('0',' ')
answer.append(ans)
return answer
n = 6
arr1 = [46, 33, 33, 22, 31, 50]
arr2 = [27, 56, 19, 14, 14, 10]
print(solution(n,arr1,arr2)) |
# Generated by Django 3.1 on 2020-09-15 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='course_description',
),
migrations.AddField(
model_name='courseoutline',
name='course_description',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='course',
name='course_datestamp',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
#!/usr/bin/python3
import sys
if len(sys.argv) != 2:
sys.exit("[ERROR] Bad parameter(s)")
fname = sys.argv[1]
text = ""
try:
file = open(fname, encoding='utf-8')
text = file.read()
except IOError:
sys.exit("[ERROR] Could not read file \"" + fname + "\"")
else:
file.close()
while True:
a = text.find("\\begin{proof}")
b = text.find("\\end{proof}")
if a == -1 and b == -1:
break
if a == -1 or b == -1 or a>b or text.find("\\begin{proof}",a+1,b) != -1:
sys.exit("[ERROR] Bad structure")
text = text[:a] + text[b+11:]
print(text,end="")
|
# coding: utf-8
# flake8: noqa
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from oanda.api.default_api import DefaultApi
# import ApiClient
from oanda.api_client import ApiClient
from oanda.configuration import Configuration
# import models into sdk package
from oanda.models.accept_datetime_format import AcceptDatetimeFormat
from oanda.models.account import Account
from oanda.models.account_changes import AccountChanges
from oanda.models.account_changes_state import AccountChangesState
from oanda.models.account_financing_mode import AccountFinancingMode
from oanda.models.account_id import AccountID
from oanda.models.account_properties import AccountProperties
from oanda.models.account_summary import AccountSummary
from oanda.models.account_units import AccountUnits
from oanda.models.calculated_account_state import CalculatedAccountState
from oanda.models.calculated_position_state import CalculatedPositionState
from oanda.models.calculated_trade_state import CalculatedTradeState
from oanda.models.cancellable_order_type import CancellableOrderType
from oanda.models.candlestick import Candlestick
from oanda.models.candlestick_data import CandlestickData
from oanda.models.candlestick_granularity import CandlestickGranularity
from oanda.models.client_comment import ClientComment
from oanda.models.client_configure_reject_transaction import ClientConfigureRejectTransaction
from oanda.models.client_configure_transaction import ClientConfigureTransaction
from oanda.models.client_extensions import ClientExtensions
from oanda.models.client_id import ClientID
from oanda.models.client_price import ClientPrice
from oanda.models.client_tag import ClientTag
from oanda.models.close_position_body import ClosePositionBody
from oanda.models.close_trade_body import CloseTradeBody
from oanda.models.close_transaction import CloseTransaction
from oanda.models.configure_account_body import ConfigureAccountBody
from oanda.models.create_order_body import CreateOrderBody
from oanda.models.create_transaction import CreateTransaction
from oanda.models.currency import Currency
from oanda.models.daily_financing_transaction import DailyFinancingTransaction
from oanda.models.date_time import DateTime
from oanda.models.decimal_number import DecimalNumber
from oanda.models.delayed_trade_closure_transaction import DelayedTradeClosureTransaction
from oanda.models.direction import Direction
from oanda.models.dynamic_order_state import DynamicOrderState
from oanda.models.fixed_price_order import FixedPriceOrder
from oanda.models.fixed_price_order_reason import FixedPriceOrderReason
from oanda.models.fixed_price_order_transaction import FixedPriceOrderTransaction
from oanda.models.funding_reason import FundingReason
from oanda.models.guaranteed_stop_loss_order_entry_data import GuaranteedStopLossOrderEntryData
from oanda.models.guaranteed_stop_loss_order_level_restriction import GuaranteedStopLossOrderLevelRestriction
from oanda.models.guaranteed_stop_loss_order_mode import GuaranteedStopLossOrderMode
from oanda.models.home_conversions import HomeConversions
from oanda.models.inline_response_200 import InlineResponse200
from oanda.models.inline_response_200_1 import InlineResponse2001
from oanda.models.inline_response_200_10 import InlineResponse20010
from oanda.models.inline_response_200_11 import InlineResponse20011
from oanda.models.inline_response_200_12 import InlineResponse20012
from oanda.models.inline_response_200_13 import InlineResponse20013
from oanda.models.inline_response_200_14 import InlineResponse20014
from oanda.models.inline_response_200_15 import InlineResponse20015
from oanda.models.inline_response_200_16 import InlineResponse20016
from oanda.models.inline_response_200_17 import InlineResponse20017
from oanda.models.inline_response_200_18 import InlineResponse20018
from oanda.models.inline_response_200_19 import InlineResponse20019
from oanda.models.inline_response_200_2 import InlineResponse2002
from oanda.models.inline_response_200_20 import InlineResponse20020
from oanda.models.inline_response_200_21 import InlineResponse20021
from oanda.models.inline_response_200_22 import InlineResponse20022
from oanda.models.inline_response_200_23 import InlineResponse20023
from oanda.models.inline_response_200_24 import InlineResponse20024
from oanda.models.inline_response_200_25 import InlineResponse20025
from oanda.models.inline_response_200_26 import InlineResponse20026
from oanda.models.inline_response_200_27 import InlineResponse20027
from oanda.models.inline_response_200_28 import InlineResponse20028
from oanda.models.inline_response_200_29 import InlineResponse20029
from oanda.models.inline_response_200_3 import InlineResponse2003
from oanda.models.inline_response_200_30 import InlineResponse20030
from oanda.models.inline_response_200_31 import InlineResponse20031
from oanda.models.inline_response_200_4 import InlineResponse2004
from oanda.models.inline_response_200_5 import InlineResponse2005
from oanda.models.inline_response_200_6 import InlineResponse2006
from oanda.models.inline_response_200_7 import InlineResponse2007
from oanda.models.inline_response_200_8 import InlineResponse2008
from oanda.models.inline_response_200_9 import InlineResponse2009
from oanda.models.inline_response_201 import InlineResponse201
from oanda.models.inline_response_201_1 import InlineResponse2011
from oanda.models.inline_response_400 import InlineResponse400
from oanda.models.inline_response_400_1 import InlineResponse4001
from oanda.models.inline_response_400_2 import InlineResponse4002
from oanda.models.inline_response_400_3 import InlineResponse4003
from oanda.models.inline_response_400_4 import InlineResponse4004
from oanda.models.inline_response_400_5 import InlineResponse4005
from oanda.models.inline_response_400_6 import InlineResponse4006
from oanda.models.inline_response_400_7 import InlineResponse4007
from oanda.models.inline_response_401 import InlineResponse401
from oanda.models.inline_response_404 import InlineResponse404
from oanda.models.inline_response_404_1 import InlineResponse4041
from oanda.models.inline_response_404_2 import InlineResponse4042
from oanda.models.inline_response_404_3 import InlineResponse4043
from oanda.models.inline_response_404_4 import InlineResponse4044
from oanda.models.inline_response_404_5 import InlineResponse4045
from oanda.models.inline_response_404_6 import InlineResponse4046
from oanda.models.instrument import Instrument
from oanda.models.instrument_commission import InstrumentCommission
from oanda.models.instrument_name import InstrumentName
from oanda.models.instrument_type import InstrumentType
from oanda.models.limit_order import LimitOrder
from oanda.models.limit_order_reason import LimitOrderReason
from oanda.models.limit_order_reject_transaction import LimitOrderRejectTransaction
from oanda.models.limit_order_request import LimitOrderRequest
from oanda.models.limit_order_transaction import LimitOrderTransaction
from oanda.models.liquidity_regeneration_schedule import LiquidityRegenerationSchedule
from oanda.models.liquidity_regeneration_schedule_step import LiquidityRegenerationScheduleStep
from oanda.models.mt4_transaction_heartbeat import MT4TransactionHeartbeat
from oanda.models.margin_call_enter_transaction import MarginCallEnterTransaction
from oanda.models.margin_call_exit_transaction import MarginCallExitTransaction
from oanda.models.margin_call_extend_transaction import MarginCallExtendTransaction
from oanda.models.market_if_touched_order import MarketIfTouchedOrder
from oanda.models.market_if_touched_order_reason import MarketIfTouchedOrderReason
from oanda.models.market_if_touched_order_reject_transaction import MarketIfTouchedOrderRejectTransaction
from oanda.models.market_if_touched_order_request import MarketIfTouchedOrderRequest
from oanda.models.market_if_touched_order_transaction import MarketIfTouchedOrderTransaction
from oanda.models.market_order import MarketOrder
from oanda.models.market_order_delayed_trade_close import MarketOrderDelayedTradeClose
from oanda.models.market_order_margin_closeout import MarketOrderMarginCloseout
from oanda.models.market_order_margin_closeout_reason import MarketOrderMarginCloseoutReason
from oanda.models.market_order_position_closeout import MarketOrderPositionCloseout
from oanda.models.market_order_reason import MarketOrderReason
from oanda.models.market_order_reject_transaction import MarketOrderRejectTransaction
from oanda.models.market_order_request import MarketOrderRequest
from oanda.models.market_order_trade_close import MarketOrderTradeClose
from oanda.models.market_order_transaction import MarketOrderTransaction
from oanda.models.open_trade_financing import OpenTradeFinancing
from oanda.models.order import Order
from oanda.models.order_book import OrderBook
from oanda.models.order_book_bucket import OrderBookBucket
from oanda.models.order_cancel_reason import OrderCancelReason
from oanda.models.order_cancel_reject_transaction import OrderCancelRejectTransaction
from oanda.models.order_cancel_transaction import OrderCancelTransaction
from oanda.models.order_client_extensions_modify_reject_transaction import OrderClientExtensionsModifyRejectTransaction
from oanda.models.order_client_extensions_modify_transaction import OrderClientExtensionsModifyTransaction
from oanda.models.order_fill_reason import OrderFillReason
from oanda.models.order_fill_transaction import OrderFillTransaction
from oanda.models.order_id import OrderID
from oanda.models.order_identifier import OrderIdentifier
from oanda.models.order_position_fill import OrderPositionFill
from oanda.models.order_request import OrderRequest
from oanda.models.order_specifier import OrderSpecifier
from oanda.models.order_state import OrderState
from oanda.models.order_state_filter import OrderStateFilter
from oanda.models.order_trigger_condition import OrderTriggerCondition
from oanda.models.order_type import OrderType
from oanda.models.position import Position
from oanda.models.position_aggregation_mode import PositionAggregationMode
from oanda.models.position_book import PositionBook
from oanda.models.position_book_bucket import PositionBookBucket
from oanda.models.position_financing import PositionFinancing
from oanda.models.position_side import PositionSide
from oanda.models.price import Price
from oanda.models.price_bucket import PriceBucket
from oanda.models.price_status import PriceStatus
from oanda.models.price_value import PriceValue
from oanda.models.pricing_heartbeat import PricingHeartbeat
from oanda.models.quote_home_conversion_factors import QuoteHomeConversionFactors
from oanda.models.reopen_transaction import ReopenTransaction
from oanda.models.replace_order_body import ReplaceOrderBody
from oanda.models.request_id import RequestID
from oanda.models.reset_resettable_pl_transaction import ResetResettablePLTransaction
from oanda.models.set_order_client_extensions_body import SetOrderClientExtensionsBody
from oanda.models.set_trade_client_extensions_body import SetTradeClientExtensionsBody
from oanda.models.set_trade_dependent_orders_body import SetTradeDependentOrdersBody
from oanda.models.statement_year import StatementYear
from oanda.models.stop_loss_details import StopLossDetails
from oanda.models.stop_loss_order import StopLossOrder
from oanda.models.stop_loss_order_reason import StopLossOrderReason
from oanda.models.stop_loss_order_reject_transaction import StopLossOrderRejectTransaction
from oanda.models.stop_loss_order_request import StopLossOrderRequest
from oanda.models.stop_loss_order_transaction import StopLossOrderTransaction
from oanda.models.stop_order import StopOrder
from oanda.models.stop_order_reason import StopOrderReason
from oanda.models.stop_order_reject_transaction import StopOrderRejectTransaction
from oanda.models.stop_order_request import StopOrderRequest
from oanda.models.stop_order_transaction import StopOrderTransaction
from oanda.models.take_profit_details import TakeProfitDetails
from oanda.models.take_profit_order import TakeProfitOrder
from oanda.models.take_profit_order_reason import TakeProfitOrderReason
from oanda.models.take_profit_order_reject_transaction import TakeProfitOrderRejectTransaction
from oanda.models.take_profit_order_request import TakeProfitOrderRequest
from oanda.models.take_profit_order_transaction import TakeProfitOrderTransaction
from oanda.models.time_in_force import TimeInForce
from oanda.models.trade import Trade
from oanda.models.trade_client_extensions_modify_reject_transaction import TradeClientExtensionsModifyRejectTransaction
from oanda.models.trade_client_extensions_modify_transaction import TradeClientExtensionsModifyTransaction
from oanda.models.trade_id import TradeID
from oanda.models.trade_open import TradeOpen
from oanda.models.trade_pl import TradePL
from oanda.models.trade_reduce import TradeReduce
from oanda.models.trade_specifier import TradeSpecifier
from oanda.models.trade_state import TradeState
from oanda.models.trade_state_filter import TradeStateFilter
from oanda.models.trade_summary import TradeSummary
from oanda.models.trailing_stop_loss_details import TrailingStopLossDetails
from oanda.models.trailing_stop_loss_order import TrailingStopLossOrder
from oanda.models.trailing_stop_loss_order_reason import TrailingStopLossOrderReason
from oanda.models.trailing_stop_loss_order_reject_transaction import TrailingStopLossOrderRejectTransaction
from oanda.models.trailing_stop_loss_order_request import TrailingStopLossOrderRequest
from oanda.models.trailing_stop_loss_order_transaction import TrailingStopLossOrderTransaction
from oanda.models.transaction import Transaction
from oanda.models.transaction_filter import TransactionFilter
from oanda.models.transaction_heartbeat import TransactionHeartbeat
from oanda.models.transaction_id import TransactionID
from oanda.models.transaction_reject_reason import TransactionRejectReason
from oanda.models.transaction_type import TransactionType
from oanda.models.transfer_funds_reject_transaction import TransferFundsRejectTransaction
from oanda.models.transfer_funds_transaction import TransferFundsTransaction
from oanda.models.units_available import UnitsAvailable
from oanda.models.units_available_details import UnitsAvailableDetails
from oanda.models.user_info import UserInfo
from oanda.models.user_info_external import UserInfoExternal
from oanda.models.user_specifier import UserSpecifier
from oanda.models.weekly_alignment import WeeklyAlignment
|
from lively_tk_ros.configuration.config_manager import ConfigManager
import os
from pprint import PrettyPrinter
pprinter = PrettyPrinter()
pprint = lambda content: pprinter.pprint(content)
script_dir = os.path.dirname(__file__)
urdf_file = os.path.join(script_dir,'./launch/ur3e.xml')
with open(urdf_file) as file:
urdf = file.read().replace('\n', '')
print(urdf)
cm = ConfigManager()
print('created manager')
cm.update({'urdf':urdf})
print('updated urdf')
|
import os
import cv2 as cv
# Original image
image = cv.imread("./Resources/Photos/group 2.jpg")
cv.imshow("Original", image)
gray_image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cv.imshow("Gray", gray_image)
# haar cascade clasifier
haar_face = cv.CascadeClassifier(
cv.data.haarcascades + "haarcascade_frontalface_default.xml"
)
face_rect = haar_face.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=4)
for (x, y, w, h) in face_rect:
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv.imshow("Faces", image)
cv.waitKey(0)
cv.destroyAllWindows()
|
import numpy as np
import scipy
if tuple(map(int, scipy.__version__.split('.'))) < (1, 0, 0):
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
import time
from tqdm.autonotebook import tqdm
def normalize_features(features):
'''features: n by d matrix'''
assert(len(features.shape)==2)
norma=np.sqrt(np.sum(features ** 2, axis=1).reshape(-1, 1))+1e-6
return features/norma
class vMFMM:
def __init__(self, cls_num, init_method = 'random'):
self.cls_num = cls_num
self.init_method = init_method
def fit(self, features, kappa, max_it=300, tol = 5e-5, normalized=False, verbose=True):
self.features = features
if not normalized:
self.features = normalize_features(features)
self.n, self.d = self.features.shape
self.kappa = kappa
# What is pi??
self.pi = np.random.random(self.cls_num)
self.pi /= np.sum(self.pi)
if self.init_method =='random':
self.mu = np.random.random((self.cls_num, self.d))
self.mu = normalize_features(self.mu)
elif self.init_method =='k++':
centers = []
centers_i = []
if self.n > 50000:
rdn_index = np.random.choice(self.n, size=(50000,), replace=False)
else:
rdn_index = np.array(range(self.n), dtype=int)
# Multiplying f * f.T gives the norm of f -> cosine distance
cos_dis = 1-np.dot(self.features[rdn_index], self.features[rdn_index].T)
centers_i.append(np.random.choice(rdn_index))
# Why index at 0? There's only one cluster center at this point
centers.append(self.features[centers_i[0]])
for i in range(self.cls_num-1):
# Pick centers depending on probability which is a function of
# minimum cosine distance
cdisidx = [np.where(rdn_index==cci)[0][0] for cci in centers_i]
prob = np.min(cos_dis[:,cdisidx], axis=1)**2
prob /= np.sum(prob)
centers_i.append(np.random.choice(rdn_index, p=prob))
centers.append(self.features[centers_i[-1]])
self.mu = np.array(centers)
del(cos_dis)
self.mllk_rec = []
pbar = tqdm(range(max_it), disable=not verbose,
desc="Fitting vMF clusters", leave=False)
for itt in pbar:
_st = time.time()
self.e_step()
self.m_step()
_et = time.time()
self.mllk_rec.append(self.mllk)
if len(self.mllk_rec)>1 and self.mllk - self.mllk_rec[-2] < tol:
# print("Early stop at iter {0}, llk {1}".format(itt, self.mllk))
pbar.close()
break
def fit_soft(self, features, p, mu, pi, kappa, max_it=300, tol = 1e-6, normalized=False, verbose=True):
self.features = features
if not normalized:
self.features = normalize_features(features)
self.p = p
self.mu = mu
self.pi = pi
self.kappa = kappa
self.n, self.d = self.features.shape
for itt in range(max_it):
self.e_step()
self.m_step()
self.mllk_rec.append(self.mllk)
if len(self.mllk_rec)>1 and self.mllk - self.mllk_rec[-2] < tol:
#print("early stop at iter {0}, llk {1}".format(itt, self.mllk))
break
def e_step(self):
"""Update VMF probabilities (Equation (3))"""
# update VMF probabilities (Equation (3))
logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k
logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)
self.p = np.exp(logP_norm)
self.mllk = np.mean(logsumexp(logP, axis=1))
def m_step(self):
"""Update pi () and mu (VMF mean)"""
# update pi and mu
self.pi = np.sum(self.p, axis=0)/self.n
# fast version, requires more memory
self.mu = np.dot(self.p.T, self.features)/np.sum(self.p, axis=0).reshape(-1,1)
self.mu = normalize_features(self.mu)
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction creer_prototype_objet."""
from corps.fonctions import valider_cle
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Crée un prototype d'objet."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.creer_prototype_objet, "str", "str")
@staticmethod
def creer_prototype_objet(prototype, type):
"""Crée un prototype d'objet du type précisé.
Cette fonction permet de créer un prototype d'objet du type
indiqué. Le prototype d'objet est retourné et peut être
manipulé pour d'avantage de configuration (voire les exemples
ci-dessous). La clé précisée est utilisée telle quelle. Si
un prototype d'objet de cette clé existe déjà, cependant, le
système va essayer de créer la clé ${cle}_2, ${cle}_3 et ainsi
de suite, jusqu'à en trouver une libre.
Si la clé de prototype précisée finit par "_X", (le signe
souligné suivi de la lettre X), le système cherchera le prototype
correspondant en remplaçant X par un nombre (ce qui est souvent
un comportement plus logique quand on veut créer des
prototypes en série).
Paramètres à préciser :
* prototype : la clé du prototype à créer (une chaîne de caractères) ;
* type : le nom du type (une chaîne de caractères).
Exemples d'utilisation :
prototype = creer_prototype_objet("pomme_rouge", "fruit")
# Si un prototype d'objet de la clé 'pomme_rouge' existe
# déjà, le système créra le prototype 'pomme_rouge_2'
journal = creer_prototype_objet("journal_X", "livre")
# Le système cherchera à créer le prototype d'objet 'journal_1'.
# Si la clé existe, alors 'journal_2', 'journal_3' et ainsi
# de suite.
changer_nom journal "un journal" "journaux"
changer_etat journal "est posé là" "sont posés là"
changer_description journal "C'est un journal."
changer_poids journal 0.2
changer_prix journal 50
ajouter_chapitre journal "Chapitre 1" "C'est le chapitre 1."
...
"""
type = importeur.objet.get_type(type).nom_type
cles = tuple(importeur.objet.prototypes.keys())
prototype = prototype.lower()
nb = 1
cle = prototype
if prototype.endswith("_x"):
prototype = prototype[:-2]
cle = prototype + "_1"
valider_cle(prototype)
while cle in cles:
nb += 1
cle = "{}_{}".format(prototype, nb)
return importeur.objet.creer_prototype(cle, type)
|
# Generated by Django 3.1.2 on 2020-11-13 08:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20201113_1032'),
]
operations = [
migrations.AlterField(
model_name='productoption',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='options', to='api.product'),
),
migrations.AlterField(
model_name='productoptionchoice',
name='product_option',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='api.productoption'),
),
]
|
from django.views.generic import FormView
from .mixins import AjaxableResponseMixin
from .forms import EmailSendForm
from .utils import email_send
# Create your views here.
class EmailSendView(AjaxableResponseMixin, FormView):
form_class = EmailSendForm
http_method_names = [u'post']
def form_valid(self, form):
data = form.cleaned_data
email_send(
email=data['email'],
to_email='inquire@spectrumone.co',
subject=data['subject'],
content=data['message']
)
return super(EmailSendView, self).form_valid(form)
|
def notas(*n, sit = False):
notas = dict()
notas['total'] = len(n)
notas['maior'] = max(n)
notas['menor'] = min(n)
notas['média'] = sum(n)/len(n)
if sit:
if notas['média'] >= 7:
notas['situação'] = 'Boa'
if notas['média'] >= 5:
notas['situação'] = 'Razoável'
else:
notas['situção'] = 'Péssimo'
return notas
#Programa Princiapal
resp = notas(4, 5, 5, sit = True )
print(resp) |
# Library Imports
import os
os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1'
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import gym
from MultiTD3 import Agent
import random
from gym.wrappers.time_limit import TimeLimit
from custom_pendulum import CustomPendulum
from gym.envs.classic_control.pendulum import PendulumEnv
# Absolute Path
path = os.getcwd()
# Load the Environment
env = TimeLimit(env=CustomPendulum(PendulumEnv(), _seed=0), max_episode_steps=200)
# Init. Training
n_games = 300
score_history = []
avg_history = []
best_score = env.reward_range[0]
avg_score = 0
# Init. Global Replay Buffer
class ReplayBuffer:
"""Defines the Buffer dataset from which the agent learns"""
def __init__(self, max_size, input_shape, dim_actions):
self.mem_size = max_size
self.mem_cntr = 0
self.state_memory = np.zeros((self.mem_size, input_shape), dtype=np.float32)
self.new_state_memory = np.zeros((self.mem_size, input_shape), dtype=np.float32)
self.action_memory = np.zeros((self.mem_size, dim_actions), dtype=np.float32)
self.reward_memory = np.zeros(self.mem_size, dtype=np.float32)
self.terminal_memory = np.zeros(self.mem_size, dtype=np.bool)
def store_transition(self, state, action, reward, new_state, done):
index = self.mem_cntr % self.mem_size
self.state_memory[index] = state
self.new_state_memory[index] = new_state
self.action_memory[index] = action
self.reward_memory[index] = reward
self.terminal_memory[index] = done
self.mem_cntr += 1
def sample_buffer(self, batch_size):
max_mem = min(self.mem_cntr, self.mem_size)
batch = np.random.choice(max_mem, batch_size, replace=False)
states = self.state_memory[batch]
_states = self.new_state_memory[batch]
actions = self.action_memory[batch]
rewards = self.reward_memory[batch]
dones = self.terminal_memory[batch]
return states, actions, rewards, _states, dones
# Init. Agent & replay buffer
agent = Agent(env, 'agent')
Buffer = ReplayBuffer(1000000, env.observation_space.shape[0], env.action_space.shape[0])
for i in range(n_games):
score = 0
done = False
# Initial Reset of Environment
obs = env.reset()
while not done:
action = agent.choose_action(obs)
_obs, reward, done, info = env.step(action)
Buffer.store_transition(obs, action, reward, _obs, done)
obs = _obs
score += reward
# Optimize the Agent
agent.learn(Buffer, 64)
score_history.append(score)
avg_score = np.mean(score_history[-100:])
avg_history.append(avg_score)
if avg_score > best_score:
best_score = avg_score
print(f"Saving 'agent' with best score:{best_score}")
agent.actor.save_weights(path + '/SingleAgentProfiling/data/agent.h5')
print(f'Episode:{i} \t ACC. Rewards: {score:3.2f} \t AVG. Rewards: {avg_score:3.2f} \t *** MODEL SAVED! ***')
else:
print(f'Episode:{i} \t ACC. Rewards: {score:3.2f} \t AVG. Rewards: {avg_score:3.2f}')
# Save the Training data and Model Loss
np.save(path + '/SingleAgentProfiling/data/score_history', score_history, allow_pickle=False)
np.save(path + '/SingleAgentProfiling/data/avg_history', avg_history, allow_pickle=False)
|
list = []
getNum = int(input('How many numbers: '))
for n in range(getNum):
numbers = int(input('Enter number: '))
list.append(numbers)
def addList(numbers):
sum = 0
for num in numbers:
sum += num
return sum
result = addList(list)
print(result) |
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QPushButton, QCheckBox, QGridLayout, QLineEdit, QLabel, QSizePolicy
class AboutUsWindow(QDialog):
def __init__(self, controller, *args, **kwargs):
super().__init__(*args, **kwargs)
self.controller = controller
self.setWindowFlags(Qt.FramelessWindowHint)
self.setStyleSheet('''
QDialog {
background: #323232;
border: 2px solid black;
border-radius: 4px;
}
QLabel {
color: #808080;
font-size: 14px;
}
QPushButton {
height: 25px;
width: 70px;
color: #808080;
background: #202020;
font-size: 14px;
font-weight: bold;
border: 2px solid #808080;
border-radius: 3px;
}
QPushButton:hover {
background: #343434;
}
QPushButton:pressed {
background: #484848;
}
''')
self.title_label = QLabel('InstantGIS - version {}'.format(self.controller.version))
self.title_label.setAlignment(Qt.AlignHCenter)
self.title_label.setStyleSheet('font-size: 20px; font-weight: bold;')
self.creators_label = QLabel('InstantGIS was created by Alex Kasapis with the help of Filippos Zacharopoulos.')
self.creators_label.setAlignment(Qt.AlignHCenter)
self.creators_label.setWordWrap(True)
self.goal_label = QLabel('InstantGIS is a free tool that tries to cut down the time required to transfer world coordinate points stored in analog media (publications, books, etc) into digital files.')
self.goal_label.setAlignment(Qt.AlignHCenter)
self.goal_label.setWordWrap(True)
self.icons8_label = QLabel('This application was created using image assets from icons8.com.')
self.icons8_label.setAlignment(Qt.AlignHCenter)
self.icons8_label.setWordWrap(True)
self.close_button = QPushButton('Close')
self.close_button.clicked.connect(self.close_button_clicked)
layout = QGridLayout(self)
layout.setSpacing(40)
layout.setContentsMargins(15, 10, 10, 10)
layout.setRowStretch(3, 1)
self.setLayout(layout)
layout.addWidget(self.title_label, 0, 0)
layout.addWidget(self.creators_label, 1, 0)
layout.addWidget(self.goal_label, 2, 0)
layout.addWidget(self.icons8_label, 3, 0)
layout.addWidget(self.close_button, 4, 0)
def close_button_clicked(self):
self.close()
|
import Diary2
print("문단을 입력해보세요")
para=input()
print("슬픔, 중립, 행복, 불안, 분노, 예외 리스트입니다.")
response = Diary2.predict(para)
print(response)
#print(Diary2.predict("너무 슬퍼요"))
|
#!/usr/bin/env python
# encoding: utf-8
# @author: liusir
# @file: demo_04.py
# @time: 2020/11/29 9:47 上午
import json
json_obj = {"access_token":"39_qHfCmB0GdutZ2MXC0G5IbzrM3WY7ES3JQF_bY04G-ceI-umT7_9E7-m0e3lVx-YFJRcTMnmKga-ijt45IFCrBPeIbbq0PsFphgzjAyaAeYhk8Po13Ix7oQQAi-a85xplVyuERp_rIci3wiP1CRKiAFAIXQ","expires_in":7200}
except_str = '{"expires_in":7200,"key":18}'
except_dict = json.loads(except_str)
print( except_dict.items() )
print( json_obj.items() )
# 方式一:
if list(except_dict.items())[0] in list(json_obj.items()):
print( 'true' )
# 方式二:考虑多项
yes_no = []
for except_item in except_dict.items():
if except_item in json_obj.items():
yes_no.append( True )
else:
yes_no.append( False )
if False in yes_no:
print( 'False' )
else:
print( 'true' )
|
from django.contrib import admin
# Register your models here.
# Register your models here.
from .models import *
#model admin options
class PostModelAdmin(admin.ModelAdmin):
list_display = ["id","mainlocation","othername",]
#list_display_links = ["updated"]
class Meta:
model = tree
admin.site.register(tree, PostModelAdmin)
class PostModelAdmin(admin.ModelAdmin):
list_display = ["id","location","death",]
#list_display_links = ["updated"]
class Meta:
model = information
admin.site.register(information, PostModelAdmin)
|
# Here we include the weather-api so we can use it in our Python application.
from weather import Weather, Unit
# Then we use the module to search for the weather at a location
weather = Weather(unit=Unit.CELSIUS)
location = weather.lookup_by_location('Anchorage, AK')
condition = location.condition
print(condition.text)
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
åååæææøøø
lengde_meter_streng = input("Skriv inn lengde i meter: ")
bredde_meter_streng = input("Skriv inn bredde i meter: ")
lengde_meter = float(lengde_meter_streng)
bredde_meter = float(bredde_meter_streng)
areal = lengde_meter*bredde_meter
print(f"Arealet er: {round(areal, 2)}")
|
from LoginApp import login_app
import unittest
uname = "prakash"
pwd = "Prakash123"
class Test_Login_App(unittest.TestCase):
def test_user_creation_success(self):
login_app.__init__(self)
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
def test_user_creation_failure(self):
login_app.__init__(self)
uname="prakash123"
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
def test_success_login(self):
login_app.__init__(self)
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
result=login_app.check(self,uname,pwd)
self.assertTrue(result)
def test_failure_login(self):
login_app.__init__(self)
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
result=login_app.check(self,"pk",pwd)
self.assertTrue(result)
def test_minimum_password_length(self):
login_app.__init__(self)
pwd="rrrrr"
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
def test_password_consists_of_one_alphabet(self):
login_app.__init__(self)
pwd="1234567"
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
def test_password_consists_of_atleast_one_integer(self):
login_app.__init__(self)
pwd="prakash"
result=login_app.register(self,uname,pwd)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main() |
class ErrorCodes:
COMBINATION_DOES_NOT_EXIST = 'COMBINATION_DOES_NOT_EXIST'
GAME_DOES_NOT_EXIST = 'GAME_DOES_NOT_EXIST'
GAME_IS_FINISHED = 'GAME_IS_FINISHED'
WORD_HAS_BEEN_ADDED_ALREADY = 'WORD_HAS_BEEN_ADDED_ALREADY'
INCORRECT_LENGTH = 'INCORRECT_LENGTH'
INCORRECT_SEQUENCE = 'INCORRECT_SEQUENCE'
WORD_DOES_NOT_EXIST = 'WORD_DOES_NOT_EXIST'
|
from item import Item
from filemanager import setup
def fractional_knapsack(capacity, items):
'''
This algorithm solves the problem by calculating the profit per weight
and adds the items with highest value first to the knapsack
until no more items fit
Assumption: Each item can only be added to the knapsack once
'''
if capacity <= 0 or len(items) <= 0:
return None
weight = 0
knapsack = []
def sort():
'''Sort items by profit/weight'''
items.sort(key=lambda x: x.profit/x.weight, reverse=True)
def binary(weight):
index = 0
while(items and index < len(items)):
if weight + items[index].weight <= capacity:
knapsack.append(items[index])
weight += items[index].weight
items.pop(index)
else:
index += 1
return weight
def fraction(weight):
remainder = capacity - weight
item = items[0]
newWeight = remainder / item.weight
newProfit = newWeight * item.profit
newItem = Item(newProfit, remainder)
knapsack.append(newItem)
sort()
weight = binary(weight)
fraction(weight)
return knapsack
def binary_knapsack(capacity, items):
'''
0-1 dynamic solver, bottom up
Starts at the back it item list
Max compares two paramter
First parameter includes the indexed item in the knapsack
Second paramter excludes the indexed item in the knapsack
'''
knapsack = []
def binary(capacity, items, index):
if capacity <= 0 or index == 0:
return 0
# Skip items with weight larger than capacity
if items[index - 1].weight > capacity:
return binary(capacity, items, index - 1)
else:
return max(items[index - 1].profit + binary(
capacity - items[index - 1].weight, items, index - 1),
binary(capacity, items, index - 1))
# Creates a list of the items added to the knapsack
# temp1 = items[index - 1].profit + binary(
# capacity - items[index - 1].weight, items, index - 1)
# temp2 = binary(capacity, items, index - 1)
# if temp1 >= temp2:
# knapsack.append(items[index - 1])
# return temp1
# else:
# return temp2
return knapsack, binary(capacity, items, len(items))
def calc_tests(items):
weight = 0
profit = 0
for item in items:
profit += item.profit
weight += item.weight
return profit, weight
def main():
# capacity = 30
# items = [Item(50, 5), Item(60, 10), Item(120, 20)]
capacity, solution, items, weights, profits = setup()
item = fractional_knapsack(capacity, list(items))
profit, weight = calc_tests(item)
print(f"Profit: {profit}, Weight: {weight}")
print(f"Remaining capacity: {capacity - weight}")
knapsack, profit = binary_knapsack(capacity, list(items))
# print(*knapsack, sep='\n')
print(f"Binary profit: {profit}")
if __name__ == "__main__":
main()
|
import string
from helpers import alphabet_position, rotate_character
def encrypt(text, word):
vigenere = ""
counter = 0
for n in range(len(text)):
if text[n] in string.ascii_letters:
text_letter = text[n] #I think the reason for this is because our text is already at a certain length(shorter or longer)
rotation = word[(n + counter) % len(word)] #We want to save the position of the letter in word while printing the number inputted for text
rotation = rotation.lower()
position = string.ascii_lowercase.index(rotation)
vigenere = vigenere + rotate_character(text_letter, position)
else:
text_letter = text[n] #I think the reason for this is because our text is already a certain length(shorter or longer)
counter -= 1
rotation = word[(n + counter) % len(word)]
rotation = rotation.lower()#We want to save the position of the letter in word while printing the number inputted for text
position = string.ascii_lowercase.index(rotation)
vigenere = vigenere + rotate_character(text_letter, position)
return vigenere
def main():
text = input("Please enter the text you would like encrypted ")
word = input("Please enter the word that will make your encryption ")
print(encrypt(text, word))
if __name__ == '__main__':
main()
|
a = (1, 2), (3, 4), (1, 3), (2, 3), (3, 1), (3, 2)
def my_map(key, values):
temp_list = []
for x in values:
temp_list.append((x[0], 1))
return temp_list
# print(my_map('Q1', a))
def my_reducer(intermediates):
temp_list = []
for key in dict(intermediates).keys():
sumv = 0
for pair in intermediates:
if pair[0] == key:
sumv += 1
temp_list.append((key, sumv))
return temp_list
# print(my_reducer(my_map('Q1', a)))
b = ('Doc-1', 'The map function that transforms, filters, or selects input data'), (
'Doc-2', 'The reduce function that aggregates, combines, or collections results'), (
'Doc-3', 'The map function and reduce function are invoked in sequence')
def map_index(doc):
temp_dict = {}
for key, words in doc:
for word in words.replace(',', '').split():
if word not in temp_dict.keys():
temp_dict[word] = []
temp_dict[word].append((key, 1))
return temp_dict
def reduce_index(intermediate):
temp_list = []
for key, listOfValues in intermediate.items():
doc_list = []
for doc in dict(listOfValues).keys():
sum = 0
for values in listOfValues:
if values[0] == doc:
sum += 1
doc_list.append((doc, sum))
temp_list.append((key, doc_list))
return temp_list
# print(map_index(b))
# print(reduce_index(map_index(b)))
c = ('Doc-1', 'The map function that transforms, filters, or selects input data')
def map_index2(key, values):
list = []
for word in values.replace(',', '').split():
list.append((word, (key, 1)))
return list
def reduce_index2(key, values):
temp_list = []
temp_document_list = []
for value in values:
temp_document_list.append(value[0])
document_set = set(temp_document_list)
for doc in document_set:
sumValue = 0
for value in values:
if value[0] == doc:
sumValue += 1
temp_list.append((doc, sumValue))
return (key, temp_list)
print(reduce_index2('The', [('Doc-1', 1), ('Doc-2', 1), ('Doc-1', 1),]))
# print(map_index2(c[0], c[1]))
|
import functools
from nltk.stem.snowball import SnowballStemmer
import numpy as np
import pandas as pd
import scipy.sparse
import sklearn.feature_extraction.text
import sklearn.metrics
stemmer = SnowballStemmer('english')
def stemming_preprocessor(data):
return stemmer.stem(data)
@functools.lru_cache(maxsize=3)
def load_dataset(sampling_method, vectorization, preprocessing):
filename = f'vectorized/{sampling_method}-{vectorization}-{preprocessing or "none"}'
filename_train = f'{filename}-TRAIN.npz'
filename_test = f'{filename}-TEST.npz'
loaded_from_disk = False
try:
train_as_vector = scipy.sparse.load_npz(filename_train)
test_as_vector = scipy.sparse.load_npz(filename_test)
loaded_from_disk = True
except:
print('have to generate new vectorizations')
vectorizers = {
'count': {
None: sklearn.feature_extraction.text.CountVectorizer(),
'stop_words': sklearn.feature_extraction.text.CountVectorizer(stop_words='english'),
'stem': sklearn.feature_extraction.text.CountVectorizer(preprocessor=stemming_preprocessor)
},
'binary': {
None: sklearn.feature_extraction.text.CountVectorizer(binary=True),
'stop_words': sklearn.feature_extraction.text.CountVectorizer(binary=True, stop_words='english'),
},
'tf_idf': {
None: sklearn.feature_extraction.text.TfidfVectorizer(),
'stop_words': sklearn.feature_extraction.text.TfidfVectorizer(stop_words='english'),
'stem': sklearn.feature_extraction.text.TfidfVectorizer(preprocessor=stemming_preprocessor)
}
}
vectorizer = vectorizers[vectorization][preprocessing]
filenames = {
'random_downsampling': ('downsampled_train.csv', 'test.csv'),
'full': ('full_train.csv', 'test.csv'),
'oversampled': ('oversampled_train.csv', 'test.csv'),
}
train_name, test_name = filenames[sampling_method]
train = pd.read_csv(train_name, header=0, index_col=0)
test = pd.read_csv(test_name, header=0, index_col=0)
# print(train['reviewText'].values)
# print(train['reviewText'].index)
# print(train.dtypes)
if not loaded_from_disk:
train_as_vector = vectorizer.fit_transform(train['reviewText'].values)
test_as_vector = vectorizer.transform(test['reviewText'].values)
print('saving matrices to disk')
scipy.sparse.save_npz(filename_train, train_as_vector)
scipy.sparse.save_npz(filename_test, test_as_vector)
return train_as_vector, train['overall'].values, test_as_vector, test['overall'].values
# load_dataset('random_downsampling', 'count', None)
def get_score(classifier, test, test_targets):
return sklearn.metrics.balanced_accuracy_score(test_targets, classifier.predict(test))
def display_confusion_matrices(classifier, test, test_targets):
print(sklearn.metrics.confusion_matrix(
test_targets, classifier.predict(test), normalize='true'))
sklearn.metrics.plot_confusion_matrix(
classifier, test, test_targets, normalize='true', cmap='Purples')
def display_score(classifier, test, test_targets):
print(f'SCORE: {get_score(classifier, test, test_targets)}')
def display_classifier_performance(classifier, test, test_targets):
display_score(classifier, test, test_targets)
display_confusion_matrices(classifier, test, test_targets)
def order_aware_error(estimator, test_X, test_Y):
#predictions = estimator.predict(test_X)
#error_count = sum(predictions != test_Y)
#return sum(abs(predictions - test_Y)) / error_count
klasses = {}
for klass in range(1, 5+1):
klass_indices = (test_Y == klass)
klass_predictions = estimator.predict(test_X[klass_indices])
klass_error_count = sum(klass_predictions != test_Y[klass_indices])
klasses[f'order_aware_error_{klass}'] = \
sum(abs(klass_predictions - test_Y[klass_indices])) / klass_error_count
klasses['order_aware_error_avg'] = sum(klasses.values()) / 5
return klasses
def perf_row(
classifier, test_as_vec, test_targets, classifier_type, sampling,
representation, preprocessing, **classifier_specific):
return {
'classifier_type': classifier_type,
'sampling': sampling,
'representation': representation,
'preprocessing': preprocessing,
**classifier_specific,
'real_world_acc': classifier.score(test_as_vec, test_targets),
'score': get_score(classifier, test_as_vec, test_targets),
**order_aware_error(classifier, test_as_vec, test_targets),
} |
import random
import json
f = open('json.json', 'a')
def generate(x, y):
str = input("Enter a text (or number): ")
length = len(str)
h = x + y + length
h = h * x
h = h + y
s = random.randint(1,100)
h = h + s
return h
for i in range(0,10):
a = generate(random.randint(1,100),random.randint(1,100))
f.write(str(a))
f.close()
|
score1 = int(input('숫자를 입력하세요 : '))
if score1 % 3 == 0:
print('3의 배수입니다.')
else :
print('3의 배수가 아닙니다.')
|
from setuptools import setup
#from Cython.Build import cythonize
setup(
name="cgmspec",
version="0.1",
description="Python software for modeling and synthetic spectra from an idealized CGM model",
author="M. Hamel",
author_email="magdalena.hamel@gmail.com",
url="https://github.com/ntejos/cgmspec.git",
packages=['cgmspec'],
install_requires=[
'astropy',
'scipy',
'matplotlib',
'numpy'
])
|
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DefaultUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import gettext_lazy as _
from .forms import AdminUserChangeForm
from .models import Room, User
class AdminRoomChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(
help_text=_("Raw passwords are not stored, so there is no way to see this password.")
)
class Meta:
model = Room
fields = '__all__'
def clean_password(self):
return self.initial.get("password", "")
class RoomAdmin(admin.ModelAdmin):
list_display = ('name', 'pinned', 'created')
form = AdminRoomChangeForm
def activate_users(modeladmin, request, queryset):
queryset.update(is_active=True)
activate_users.short_description = "Make selected users active"
class UserAdmin(DefaultUserAdmin):
form = AdminUserChangeForm
list_display = ('username', 'email', 'is_staff')
search_fields = ('username', 'email')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2'),
}),
)
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('email', 'kredyti', 'level_override', 'motto', 'avatar')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important info'), {'fields': ('last_login', 'last_ip', 'date_joined')}),
)
actions = [activate_users]
admin.site.register(Room, RoomAdmin)
admin.site.register(User, UserAdmin)
|
# coding = utf-8
import time
import selenium
from selenium import webdriver
browser = webdriver.Chrome()
browser.get("http://118.178.253.144:8080/itsm") # 打开网页
browser.maximize_window()
# #填写用户名
browser.find_element_by_id('accountNameId').send_keys('admin#jingyu')
# #填写密码
browser.find_element_by_id('passwordId').send_keys('1')
# time.sleep(10)
browser.find_element_by_id('verifyCodeId').send_keys('8888')
# #手动
# #点击【登录】
browser.find_element_by_id('loginBtn').click()
time.sleep(5)
# 点击【基础功能】
browser.find_element_by_xpath(
"//div[@id='main-container']/div[1]/div[@id='sidebar']/div[@id='sidebar-shortcuts']/div[@id='sidebar-shortcuts-large']/button[2]").click()
time.sleep(2)
# 点击【服务质量管理】
browser.find_element_by_id('menu_li_id').find_element_by_xpath('li[2]/a[1]').click()
time.sleep(1)
oldFrameCut = len(
browser.find_element_by_id('maincontent').find_element_by_xpath('div[1]/div[1]').find_element_by_class_name(
'main').find_elements_by_tag_name('iframe'))
# 点击【运营时间管理】
browser.find_element_by_id('menu_li_id').find_element_by_xpath('li[2]/ul[1]/li[5]/a[1]').click()
time.sleep(2)
# iframe切换
iframe = browser.find_element_by_id('maincontent').find_element_by_xpath('div[1]/div[1]').find_element_by_class_name(
'main').find_elements_by_tag_name('iframe')[oldFrameCut]
browser.switch_to_frame(iframe)
time.sleep(1)
#新增运营时间,判断是否新增成功
def addOperatingTime(operating_time_name):
browser.find_element_by_class_name('iconsh-plus').click()
time.sleep(1)
browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[1]/td[2]/div[1]/input[2]').send_keys(operating_time_name)
time.sleep(1)
browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[2]/td[2]/div[1]/div[1]/div[1]/i[1]').click()
workingTime_o=browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[2]/td[2]/div[1]/div[1]/div[2]/ul[1]/li[2]')
workingTime=workingTime_o.text
workingTime_o.click()
browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[3]/td[2]/div[1]/div[1]/div[1]/i[1]').click()
holiday_o=browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[3]/td[2]/div[1]/div[1]/div[2]/ul[1]/li[2]')
holiday=holiday_o.text
holiday_o.click()
browser.find_element_by_id('btn_submit').click()
time.sleep(2)
operatingTimeResult=browser.find_element_by_id('baseTable').find_elements_by_xpath('tbody[1]/tr')
operatingTimeNameResult=[]
workingTimeNameResult=[]
holidayTimeNameResult=[]
for i in operatingTimeResult:
operatingTimeNameResult.append(i.find_element_by_xpath('td[3]').text)
workingTimeNameResult.append(i.find_element_by_xpath('td[5]/a[1]').text)
holidayTimeNameResult.append(i.find_element_by_xpath('td[6]/a[1]').text)
if operating_time_name in operatingTimeNameResult and workingTime in workingTimeNameResult and holiday in holidayTimeNameResult:
print('运营时间新增成功')
else:
print('运营时间新增失败')
operatingTimeName='测试运营时间'
addOperatingTime(operatingTimeName)
#模糊查询运营时间
def selectOperatingTime(operating_time_name):
time.sleep(1)
browser.find_element_by_id('keyWord').clear()
time.sleep(1)
browser.find_element_by_id('keyWord').send_keys(operating_time_name)
browser.find_element_by_class_name('iconsh-search1').click()
time.sleep(2)
#修改运营时间
def editOperatingTime(edit_operating_time_name):
selectOperatingTime(operatingTimeName)
browser.find_element_by_id('baseTable').find_element_by_xpath('tbody[1]/tr[1]/td[1]/label[1]/input[1]').click()
browser.find_element_by_class_name('iconsh-edit').click()
time.sleep(1)
browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[1]/td[2]/div[1]/input[2]').clear()
time.sleep(1)
browser.find_element_by_id('optime_div').find_element_by_xpath('table[1]/tbody[1]/tr[1]/td[2]/div[1]/input[2]').send_keys(edit_operating_time_name) # 通过第一个节点找平级的第二个节点
browser.find_element_by_id('btn_submit').click()
time.sleep(2)
editOperatingTimeResult=browser.find_element_by_id('baseTable').find_elements_by_xpath('tbody[1]/tr/td[3]')
editOperatingTimeResults=[]
for i in editOperatingTimeResult:
editOperatingTimeResults.append(i.text)
if edit_operating_time_name in editOperatingTimeResults:
print('运营时间修改成功')
else:
print('运营时间修改失败')
editOperatingTimeName='编辑测试运营时间'
editOperatingTime(editOperatingTimeName)
#默认时间设置
def defaultTimeSetting():
selectOperatingTime(operatingTimeName)
browser.find_element_by_id('baseTable').find_element_by_xpath('tbody[1]/tr[1]/td[7]/div[1]/a[1]').click()
browser.find_element_by_class_name('layui-layer-dialog').find_element_by_xpath('div[3]/a[1]').click()
time.sleep(2)
defaultTimeSettingResult=browser.find_element_by_id('baseTable').find_element_by_xpath('tbody[1]/tr[1]/td[4]/span[1]').text
if defaultTimeSettingResult =='是':
print('默认运营时间设置成功')
else:
print('默认运营时间设置失败')
defaultTimeSetting()
#删除运营时间
def deleteOperatingTime(edit_operating_time_name):
selectOperatingTime(operatingTimeName)
browser.find_element_by_id('baseTable').find_element_by_xpath('tbody[1]/tr[1]/td[1]/label[1]/input[1]').click()
browser.find_element_by_class_name('iconsh-delete').click()
time.sleep(2)
editOperatingTimeResult=browser.find_element_by_id('baseTable').find_elements_by_xpath('tbody[1]/tr/td[3]')
editOperatingTimeResults=[]
for i in editOperatingTimeResult:
editOperatingTimeResults.append(i.text)
if edit_operating_time_name in editOperatingTimeResults:
print('运营时间修改失败')
else:
print('运营时间修改成功')
deleteOperatingTime(editOperatingTimeName) |
from KerasWrapper.Wrappers.LayerWrapper import LayerWrapper
from typing import List
from abc import abstractmethod
from abc import ABC
from keras.models import Sequential
class NeuralNetWrapper(ABC):
def __init__(self, input_size, output_size, problem_type):
# Hyperparameters that are configured by the Evolutive algorithm (genes)
self._epochs = None
self._batch_size = None
self._layers = None
# Hyperparameters that are configured by the Evolutive algorithm's user
self._input_size = input_size
self._output_size = output_size
self._problem_type = problem_type
@abstractmethod
def compile(self):
pass
def with_epochs(self, epochs: int) -> 'NeuralNetWrapper':
self._epochs = epochs
return self
def with_batch_size(self, batch_size: int) -> 'NeuralNetWrapper':
self._batch_size = batch_size
return self
def with_layers(self, layers: List[LayerWrapper]) -> 'NeuralNetWrapper':
if __debug__:
assert(len(layers) >= 1)
self._layers = layers
return self
@property
def layers(self) -> List[LayerWrapper]:
return self._layers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.