index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
9,800 | 38bd9e5b2147838b6061925d72b989c83343f1c2 | from csv import reader, writer
from collections import OrderedDict as OrdDic
import sqlite3
from jsmin import jsmin
from glob import glob
from csscompressor import compress
from threading import Timer
from glob import glob
import os
import shutil
import logging
import json
class MinifyFilesPre:
def __init__(self, merge=False):
file_names = glob("resources/static/js_files/*.js")
file_names.remove("resources/static/js_files/full_version.js")
self.file_names = file_names
self.merge = merge
self.js = ""
def save(self):
"""combines several js files together, with optional minification"""
with open("resources/static/js_files/full_version.js", 'w', newline="\n") as w:
w.write(self.js)
def js_merge(self):
"""saves minified version to a single one"""
if self.merge:
js = ""
for file_name in self.file_names:
try:
js += jsmin(open(file_name, newline="\n").read())
except FileNotFoundError:
print(f"The file {file_name} could not be found")
self.js = jsmin(js)
else:
for file_name in self.file_names:
js = jsmin(open(file_name, newline="\n").read())
open(file_name, 'w', newline="\n").write(js)
@staticmethod
def min_js_file(file_name):
js = jsmin(open(file_name, newline="\n").read())
open(file_name, 'w', newline="\n").write(js)
@staticmethod
def min_css_file(file_name):
css = compress(open(file_name, newline="\n").read())
open(file_name[:-4] + '.min.css', 'w', newline="\n").write(css)
@staticmethod
def get_js_files():
file_names = glob("resources/static/js_files/*.js")
file_names.remove("resources/static/js_files/full_version.js")
class DbManager:
def __init__(self, fname=None, tname=None):
if fname:
self.FILE_NAME = fname
else:
self.FILE_NAME = 'resources/static/LOG_Temp.db'
if tname:
self.TABLE_NAME = tname
else:
self.TABLE_NAME = "'LOG_RETURNS'"
def query_data(self, conditions, entries):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
condition_order = ['logID',
'returnType',
'sender',
'reciever',
'logTime',
'dutyOfficer',
'net',
'serials']
cond_list = []
cond_string_list = []
for cond in condition_order:
val = ""
try:
val = conditions[cond]
except KeyError:
val = ""
if "|" in val:
i = 0
dep = list()
for sub_val in val.split("|"):
i+=1
cond_list.append(f"%{sub_val}%")
cond_string_list.append("("+f"lower({cond}) LIKE ?"+ f" OR lower({cond}) LIKE ?"*(i-1)+")")
else:
for sub_val in val.split(", "):
cond_string_list.append(f"lower({cond}) LIKE ?")
sub_val = f"%{sub_val.lower()}%"
cond_list.append(sub_val)
if conditions['other']:
cond = "serials"
val = conditions['other']
if "|" in val:
i = 0
for sub_val in val.split("|"):
i+=1
cond_list.append(f"%{sub_val}%")
cond_string_list.append("("+f"lower({cond}) LIKE ?"+ f" OR lower({cond}) LIKE ?"*(i-1)+")")
else:
for sub_val in val.split(", "):
cond_string_list.append(f"lower({cond}) LIKE ?")
sub_val = f"%{sub_val.lower()}%"
cond_list.append(sub_val)
if conditions['logTimeFrom']:
if conditions['logTimeTo']:
cond_string_list.append("logTime>= ? AND logTime<= ?")
cond_list.append(conditions['logTimeFrom'])
cond_list.append(conditions['logTimeTo'])
else:
cond_string_list.append("logTime>= ?")
cond_list.append(conditions['logTimeFrom'])
elif conditions['logTimeTo']:
cond_string_list.append("logTime <= ?")
cond_list.append(conditions['logTimeTo'])
cond_string = ' AND '.join(cond_string_list)
print(cond_string)
print(cond_list)
results = c.execute(f"SELECT * FROM {self.TABLE_NAME} WHERE "
f"{cond_string}"
f" ORDER BY logID DESC LIMIT {entries}", cond_list)
return results
except sqlite3.OperationalError as e:
print(e)
def create_db(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
# Create table
try:
c.execute(f'''CREATE TABLE {self.TABLE_NAME} (
`logID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`returnType` text,
`sender` text,
`reciever` text,
`logTime` integer,
`dutyOfficer` text,
`net` TEXT,
`serials` text
);''')
conn.commit()
except sqlite3.OperationalError:
print("The Db already exists")
if ret:
return self.read_return()
def count_records(self):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
results = c.execute(f"SELECT COUNT('LogID') FROM {self.TABLE_NAME}")
return results
def create_game_table(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
# Create table
try:
c.execute(f'''CREATE TABLE `{self.TABLE_NAME}` (
`GameID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`Name` TEXT DEFAULT '?',
`Rank` TEXT DEFAULT '?',
`Pl` TEXT DEFAULT '?',
`Score` INTEGER DEFAULT 0,
`Time` INTEGER
);''')
conn.commit()
except sqlite3.OperationalError:
print("The Db already exists")
if ret:
return self.read_return()
def new_return(self, lst):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(
'INSERT INTO ' + self.TABLE_NAME + ' VALUES (NULL,' +
'?, ' * (len(lst) - 1) + '?)',
lst)
except sqlite3.OperationalError as e:
print(e)
"""
if 'no such table' in str(e):
if "game" in str(self.FILE_NAME):
print("MEME")
self.create_game_table()
else:
self.create_db()
self.new_return(lst)
"""
def delete_return_byID(self, id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(f"DELETE FROM {self.TABLE_NAME} WHERE logID = {id}")
def read_return(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(f"SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}")
else:
# should not be used but just here just in case
results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_db(self)
def read_game_score(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(f"SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}")
else:
# should not be used but just here just in case
results = c.execute(f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_game_table(self)
def find_index(self, log_id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
sql_str = ("""SELECT * FROM """ +
self.TABLE_NAME +
""" WHERE logID=?""")
x = c.execute(sql_str, [str(log_id)])
return x
def get_first_index(self):
with sqlite3.connect(self.FILE_NAME) as conn:
i=""
c = conn.cursor()
sqlStr = ("""SELECT logID FROM """ +
self.TABLE_NAME +
""" WHERE logID = (SELECT MAX(logID) FROM """ +
self.TABLE_NAME + ")")
x = c.execute(sqlStr)
for i in x:
i = int(list(i)[0])
try:
return i
except UnboundLocalError:
return ""
def update_record(self, lst, logID):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
rowData = """returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?"""
c.execute(
'UPDATE ' + self.TABLE_NAME + ' SET ' + rowData + ' WHERE logID=' + logID,
lst)
class File:
@staticmethod
def db_connect(sets):
try:
fname = sets['DB_FILE_NAME']
except KeyError:
fname = None
try:
tname = sets['DB_TABLE_NAME']
except KeyError:
tname = None
conn = DbManager(fname=fname, tname=tname)
return conn
@staticmethod
def generate_css_min():
MinifyFilesPre.min_css_file('resources/static/styles/main.css')
@staticmethod
def pre_merge(merge=False):
if merge:
tmp_file = MinifyFilesPre()
tmp_file.js_merge()
tmp_file.save()
else:
MinifyFilesPre.get_js_files()
@staticmethod
def get_first(self):
return self.get_first_index()
@staticmethod
def save_dic(dic):
""" Saves the given dictionary of serials to a file """
json.dump(dic, open("resources/files/serials.csv", "w"))
# w = writer(open("resources/files/serials.csv", "w", newline="\n"))
# w.writerow(['Return Name', 'Serials'])
# for name, serials in dic.items():
# lst = []
# if name == "Return Name":
# lst.append(name)
# lst.append(serials)
# else:
# for serial in serials:
# if serial == "Return Name":
# lst.append(serials)
# else:
# inner_lst = []
# for cont in serials[serial]:
# if cont == "options":
# inner_lst.append(cont + ";;@@;;" +
# ";;##;;".join(
# serials
# [serial]
# ["options"]))
# else:
# inner_lst.append(
# cont + ";;@@;;" + serials[serial][cont])
# lst.append(serial + ';;:::;;' + ";;!!!;;".join(inner_lst))
# w.writerow([(name), (';;,,,;;'.join(lst))])
@staticmethod
def read_dic():
""" reads the dictionary of serials """
# should return the original format
dic = OrdDic()
dic.update(json.load(open("resources/files/serials.csv", "r")))
# OLD CODE
# logging.log(logging.INFO, "File path: "+os.path.realpath(__file__))
# r = reader(open("resources/files/serials.csv", "r", newline="\n"))
# i = 0
# for row in r:
# if i:
# inner_dic = OrdDic()
# for serial in row[1].split(';;,,,;;'):
# serial = serial.split(';;:::;;')
# sub_dic = OrdDic()
# for sub_serial in serial[1].split(';;!!!;;'):
# sub_serial = sub_serial.split(";;@@;;")
# if sub_serial[0] == 'options':
# options = sub_serial[1].split(";;##;;")
# sub_dic.update({sub_serial[0]: options})
# else:
# sub_dic.update(
# {sub_serial[0]: sub_serial[1]})
# inner_dic.update({serial[0]: sub_dic})
# # lst = row[1].split('\\')
# dic.update({row[0]: inner_dic})
# else:
# i += 1
# # print(" * Read Dictionary")
return dic
@staticmethod
def read_legacy():
""" Depreciated reads the dictionary and returns it in the legacy format """
serials = File.read_dic()
final_dic = OrdDic()
for name, dic in serials.items():
inner_dic = OrdDic()
for serial in dic:
inner_dic.update({serial: dic[serial]['desc']})
final_dic.update({name: inner_dic})
return final_dic
@staticmethod
def read_locations():
""" reads the file containing the locations """
r = open("resources/files/locations.txt", "r", newline="\n")
locations = r.read().split("\n")
return locations
@staticmethod
def save_Locations(lst):
lst = '\n'.join(lst)
w = open("resources/files/locations.txt", "w", newline="\n")
w.write(lst)
@staticmethod
def save_callsigns(lst):
lst = '\n'.join(lst)
w = open("resources/files/callsigns.txt", "w", newline="\n")
w.write(lst)
@staticmethod
def read_callsigns():
""" reads the file containing the callsigns """
r = open("resources/files/callsigns.txt", "r", newline="\n")
callsigns = r.read().split("\n")
return callsigns
@staticmethod
def read_settings():
""" reads the settings from file """
settings = OrdDic()
settings.update(json.load(open("resources/files/settings.txt", "r")))
## OLD WAY BELOW
#r = open("resources/files/settings.txt", "r", newline="\n")
# for option in r.read().split('\n'):
# try:
# #option = option.split('\\')
# #settings.update({option[0]: option[1]})
# # settings.update(json.loads(option))
# except IndexError:
# pass
return settings
@staticmethod
def save_settings(dic):
""" saves the given settings (dictionary) to file """
json.dump(dic, open("resources/files/settings.txt", "w"))
# LEGACY
# with open("resources/files/settings.txt", "w", newline="\n") as w:
# for sett, val in dic.items():
# w.write(sett + '\\' + val + '\n')
@staticmethod
def save_log(self, log, update=False):
""" Saves the log to file """
main_keys = [
'name',
'sender',
'receiver',
'time',
'duty',
'net'
]
# print(test)
lst = []
for key in main_keys:
# print(key)
lst.append(log[key])
log.pop(key)
# LEGACY
# inn_lst = []
# for serial, val in log.items():
# if not (serial in main_keys):
# inn_lst.append(serial + '\\' + val)
# lst.append('||'.join(inn_lst))
lst.append(json.dumps(log))
# print(lst)
if update:
self.update_record(lst, log['logID'])
else:
self.new_return(lst)
@staticmethod
def load_log_query(Db, query):
x = list(Db.query_data(query, 100))
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': row[4]})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
# LEGACY
# for serial_data in row[7:]:
# try:
# for serial in serial_data.split('||'):
# ser, val = serial.split('\\')
# val = "" + val
# ret.update({ser: str(val)})
# except AttributeError:
# print('The Db structure is incorrect')
local_log.append(ret)
except TypeError:
print("none value in db")
return local_log
@staticmethod
def load_log(Db, log_id=None):
""" loads the log file """
# try:
# r = reader(open("resources/static/logs.csv", "r"))
# except FileNotFoundError:
# w = open("resources/static/logs.csv", 'w')
# w.close()
# r = reader(open("resources/static/logs.csv", "r"))
if log_id:
row = Db.find_index(log_id).fetchone()
local_log = list()
ret = None
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
# LEGACY
# for serial_data in row[7:]:
# try:
# for serial in serial_data.split('||'):
# ser, val = serial.split('\\')
# val = "" + val
# ret.update({ser: str(val)})
# except AttributeError:
# print('The Db structure is incorrect')
except TypeError:
pass # This is handled upon return (it returns None type)
return ret
else:
try:
x = list(Db.read_return(entries=100))
except TypeError:
x = ""
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
# LEGACY
# for serial_data in row[7:]:
# try:
# for serial in serial_data.split('||'):
# ser, val = serial.split('\\')
# val = "" + val
# ret.update({ser: str(val)})
# except AttributeError:
# print('The Db structure is incorrect')
local_log.append(ret)
except TypeError:
print("none value in db")
return local_log
@staticmethod
def delete_log_byID(Db, id):
Db.delete_return_byID(id)
def fix_time(dtg):
if len(str(dtg)) == 6:
return str(dtg)
else:
return str(f'0{dtg}')
class SaveTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def backup():
files = glob("/Volumes/*")
rem = ["/Volumes/student", "/Volumes/com.apple.TimeMachine.localsnapshots", "/Volumes/Macintosh HD", "Blah"]
for path in rem:
try:
files.remove(path)
except ValueError:
pass
usb = None
for path in files:
if "CP" in path:
usb = os.path.join(path, "Backup")
break
else:
usb = None
print("No Backup USB found")
if usb:
# save to usb
print("Saving...", end=" ")
save(os.path.join(usb, "files"), "resources/files")
save(os.path.join(usb, "db"), "resources/static/db")
print("Saved")
def save(dest, src):
if not os.path.exists(dest):
os.makedirs(dest)
copytree(src, dest)
if __name__ == '__main__':
pass
# File.pre_merge()
# settings = File.read_settings()
# File.save_settings(settings)
# File.read_locations()
# File.read_callsigns()
# File.save_dic()
# File.read_dic()
# x = file()
# x.save()
|
9,801 | 7d138a0ad7e4d8f7047dd73ae503bdc7ae5aa065 | print("rap.sweeps.data_management level init") |
9,802 | f268dc4c2ae2c17e7d0d3921d29e6b952fc63c7d | # encoding=utf8
import json
import time
from util import http_util
available = 1
disable = 0
# 交易所域名
REST_URL = "https://api.bithumb.com"
PAYMENT_CURRENCY_BTC = "BTC"
PAYMENT_CURRENCY_KRW = "KRW"
# 成功码
SUCCESS_CODE = "0000"
# 没有交易对的错误码
NO_SYMBOL_CODE = ["5600", "5500"]
# bithumb提现的最小值,获取地址:https://apidocs.bithumb.com/docs/withdrawal_coin
MIN_WITHDRAWAL_QUANTITY = """BTC: 0.002 | ETH: 0.01 | LTC: 0.1 | ETC: 0.1 | XRP: 21 | BCH: 0.002 | BTG: 0.002 | EOS: 0.5 | ICX: 4 | TRX: 150 | ELF: 10 | OMG: 2 | GLM: 30 | ZIL: 30 | POWR: 23 | LRC: 42 | EOSDAC: 10 | STEEM: 0.01 | STRAX: 0.2 | ZRX: 6 | REP: 0.08 | XEM: 4 | SNT: 23 | ADA: 1 | BAT: 3 | WTC: 1.4 | LOOM: 22 | WAVES: 2 | TRUE: 10 | LINK: 0.11 | MEETONE: 10 | HORUS: 10 | ADD: 100 | RNT: 300 | ENJ: 2 | VET: 200 | MTL: 0.9 | CHL: 100 | BLACK: 10 | ATD: 100 | IOST: 1000 | TMTG: 360 | QKC: 2000 | HDAC: 200 | WET: 840 | AMO: 7000 | BSV: 0.002 | BXA: 15 | DAC: 670 | ORBS: 24 | TFUEL: 10 | VALOR: 5 | CON: 460 | ANKR: 27 | MIX: 360 | LAMB: 40 | CRO: 17 | FX: 10 | CHR: 12 | MBL: 3500 | MXC: 72 | WIN: 1 | DVP: 56 | FCT: 20 | FNB: 460 | TRV: 100 | PCM: 170 | DAD: 12 | AOA: 560 | XSR: 1300 | WOM: 15 | SOC: 360 | EM: 1000 | QBZ: 340 | BOA: 10 | FLETA: 180 | SXP: 0.9 | COS: 97 | APIX: 36 | EL: 170 | BASIC: 460 | HIVE: 18 | XPR: 800 | FIT: 720 | EGG: 360 | BORA: 17 | ARPA: 35 | APM: 100 | CKB: 170 | AERGO: 13 | ANW: 28 | CENNZ: 60 | EVZ: 44 | MCI: 170 | SRM: 0.7 | QTCON: 56 | UNI: 0.13 | YFI: 0.0001 | UMA: 0.17 | AAVE: 0.01 | COMP: 0.01 | REN: 5 | BAL: 0.08 | RSR: 59 | NMR: 0.07 | RLC: 2 | UOS: 9 | SAND: 7 | CVT: 18 | STPT: 63 | GOM2: 320 | RINGX: 28 | BEL: 0.8 | DVC: 11 | OBSR: 170 | ORC: 2 | POLA: 15 | AWO: 270 | ADP: 59 | DVI: 9 | IBP: 25 | GHX: 5 | MIR: 0.5 | CBK: 0.5 | ONX: 5 | MVC: 25 | BLY: 25 | WOZX: 3 | ANV: 2 | GRT: 3 | MM: 4 | BIOT: 77 | XNO: 12 | SNX: 0.2 | RAI: 2 | COLA: 5 | NU: 8 | OXT: 6 | LINA: 34 | MAP: 34 | AQT: 0.6 | WIKEN: 130 | MANA: 5 | LPT: 0.15 | MKR: 0.0014 | SUSHI: 0.23 | NSBT: 0.3 | DON: 2 | ASM: 9 | PUNDIX: 0.7 | CELR: 50 | ARW: 0.5 | MSB: 10 | RLY: 5 | OCEAN: 4 | BFC: 25 | ALICE: 0.4 | CHZ: 9 | BCD: 2 | GXC: 4 | BTT: 5000 | VSYS: 100 | IPX: 80 | WICC: 32 | ONT: 7 | LUNA: 12 | NEWS: 10 | AION: 35 | META: 300 | ONG: 25 | ALGO: 4 | JST: 250 | XTZ: 1.2 | MLK: 20 | WEMIX: 40 | DOT: 1.5 | SUN: 1 | ATOM: 1 | SSX: 42 | TEMCO: 2000 | LZM: 25 | HIBS: 250 | BURGER: 0.9"""
all_coin = []
currency_min_withdrawal_quantity = dict()
for q in str.split(MIN_WITHDRAWAL_QUANTITY, "|"):
c_list = q.strip().split(":")
currency_min_withdrawal_quantity[c_list[0].strip()] = float(c_list[1].strip())
all_coin.append(c_list[0].strip())
def get_all_coin_btc_ticker():
"""
:return: 获取所有币的使用btc购买时的ticker
"""
result = []
for coin in all_coin:
if coin != PAYMENT_CURRENCY_BTC:
r = get_ticker(coin, PAYMENT_CURRENCY_BTC)
if r is not None:
result.append({"coin": coin, "r": r})
time.sleep(0.05)
return result
def get_ticker(order_currency, payment_currency):
"""
获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker
https://api.bithumb.com/public/ticker/BTC_KRW
:return:
{
"status":"0000",
"data":{"opening_price":"63241000","closing_price":"63651000","min_price":"62944000","max_price":"65351000","units_traded":"2715.2835537","acc_trade_value":"173294332105.4152","prev_closing_price":"63239000","units_traded_24H":"3471.39085837","acc_trade_value_24H":"221565547379.3369","fluctate_24H":"418000","fluctate_rate_24H":"0.66","date":"1619687597790"}
}
"""
url = REST_URL + "/public/ticker/{}_{}".format(order_currency, payment_currency)
return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)
def get_min_withdrawal(coin):
"""
获取指定的资产的提现最小值
:param coin: 提现的资产,如BTC、ETH
:return:
传入的单元最少可以提现多少
"""
return currency_min_withdrawal_quantity[coin]
def get_asset_status(coin):
url = REST_URL + "/public/assetsstatus/{}".format(coin)
"""
获取资产的状态信息:https://apidocs.bithumb.com/docs/assets_status
https://api.bithumb.com/public/assetsstatus/{order_currency}
:param coin: 币
:return:
传入的币的状态,第一个值表示是否可以存入,第二个值表示是否可以提取
{
"status" : "0000",
"data" :
[
{
"deposit_status" : 1,
"withdrawal_status" : 0
}
]
}
"""
data = check_and_get_data(http_util.get(url), None)
return data["deposit_status"] == available, data["withdrawal_status"] == available
def check_and_get_data(response, ignore_codes):
body = json.loads(response.text)
status = body["status"]
if SUCCESS_CODE != status:
if status in ignore_codes:
print("返回的status={}编码不是成功,message={}".format(status, body["message"]))
return None
else:
raise BaseException("返回的status={}编码不是成功,message={}".format(status, body["message"]))
return body["data"]
|
9,803 | 8be3a3d32da208e2f45aad61813bc6f5ea513f01 | import p01 as p
stu = p.Student()
stu.say()
p.sayHello() |
9,804 | 46696ee9576d74c087ae435bfd304c8346530ab2 | """
CP1404 - Practical
Code that produces a random number between 1 and 100 inclusive
Rhys Simpson
"""
# 1.
# smallest number 5; largest number 20
# 2.
# smallest number 3; largest number 9
# no it can only produce 3, 5, 7, 9
# 3.
# smallest number 2.5000000000000000; largest number 5.5000000000000000
import random
print(random.randint(1, 100))
|
9,805 | bac3cee5e6d129fcf345d92000cb2a257c303dd5 | import random
a = input('Nome do primeiro aluno: ')
b = input('Nome do segundo aluno: ')
c = input('Nome do terceiro aluno: ')
d = input('Nome do quarto aluno: ')
names = [a, b, c, d]
print('O aluno escolhido é {}.'.format(random.choice(names)))
|
9,806 | 266add60be2b6c2de5d53504cbabf754aa62d1b0 | import unittest
from unittest.mock import ANY, MagicMock, call
from streamlink import Streamlink
from streamlink.plugins.funimationnow import FunimationNow
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlFunimationNow(PluginCanHandleUrl):
__plugin__ = FunimationNow
should_match = [
"http://www.funimation.com/anything",
"http://www.funimation.com/anything123",
"http://www.funimationnow.uk/anything",
"http://www.funimationnow.uk/anything123",
]
class TestPluginFunimationNow(unittest.TestCase):
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
group = parser.add_argument_group("Plugin Options").add_argument_group("FunimationNow")
session.plugins = {
'funimationnow': FunimationNow
}
setup_plugin_args(session, parser)
self.assertSequenceEqual(
group.add_argument.mock_calls,
[
call('--funimation-email', help=ANY),
call('--funimation-password', help=ANY),
call('--funimation-language', choices=["en", "ja", "english", "japanese"], default="english", help=ANY)
]
)
|
9,807 | 987f8ce668f2002b731822fa5f3de143a80aaafe | from src.config import Config
mock = {
"entities": {
"foo": [ "bar", "foobar" ]
},
"synonimous": {
"fizz": [ "fizzfuzz", "fuzz"]
},
"templates": [
{
"text": "{synonimous.fizz} and {entities.foo}",
"intention": "fizzfoo"
}
]
}
def test_should_config_start_correctly():
c = Config(mock)
assert c._entities == mock['entities']
assert c._synonimous == mock['synonimous']
assert c.templates == mock['templates']
assert c.get_value('synonimous', 'fizz') == mock['synonimous']['fizz'] |
9,808 | 2caea9e7bbef99b19ba917995513413385c7abdf | #!/usr/bin/env python
import pygame
import pygame.mixer as mixer
def pre_init():
mixer.pre_init(22050, -16, 2, 2048)
def init():
mixer.init()
pygame.mixer.set_num_channels(16)
def deinit():
mixer.quit()
class Music (object):
our_music_volume = 0.8
our_current_music = None
def __init__( self, filename = None ):
self.sound = None
self.channel = None
if filename is not None:
self.load( filename )
def load( self, filename ):
self.sound = mixer.Sound( filename )
def play( self, loop = -1 ):
self.sound.set_volume( Music.our_music_volume )
self.channel = self.sound.play( loop )
Music.our_current_music = self.sound
def stop( self ):
self.sound.stop()
def fadeout( self, millisec ):
self.sound.fadeout( millisec )
def is_playing( self ):
return self.channel is not None and self.channel.get_sound() is self.sound
@staticmethod
def set_global_volume( volume ):
assert volume >= 0.0
assert volume <= 1.0
Music.our_music_volume = volume
if Music.our_current_music is not None:
Music.our_current_music.set_volume( volume )
@staticmethod
def get_global_volume():
return Music.our_music_volume
class Sound (object):
our_sound_volume = 0.8
def __init__( self, filename = None ):
self.sound = None
self.channel = None
if filename is not None:
self.load( filename )
def load( self, filename ):
self.sound = mixer.Sound( filename )
def play( self, loop = 0 ):
"""for infiniteloop, set loop to -1"""
self.sound.set_volume( Sound.our_sound_volume )
self.channel = self.sound.play( loop )
def stop( self ):
self.sound.stop()
def fadeout( self, millisec ):
self.sound.fadeout( millisec )
def is_playing( self ):
return self.channel is not None and self.channel.get_sound() is self.sound
@staticmethod
def set_global_volume( volume ):
assert volume >= 0.0
assert volume <= 1.0
Sound.our_sound_volume = volume
@staticmethod
def get_global_volume():
return Sound.our_sound_volume
|
9,809 | bf7319996043a41b7d0ef4e6098c3609e5db101e | from . import chequeador_camion
from . import chequeador_camion_modelo
from . import chequeador_destino_tipo
from . import chequeador_destino
from . import chequeador_origen
from . import chequeador_minerales |
9,810 | 94d992ef4b9015aa8f42071bb1409703d509c313 | from . import *
from module import *
from transfer import *
from dataset import *
|
9,811 | bc5b368a710b8dfc4492b996c42c46638e1f538c | date = input()
if date == ("DEC 25") or date == ("OCT 31"):
print("yup")
else:
print("nope") |
9,812 | d91dc850c293cf085e1be04b6e13e0a62cb0bcb1 | # e.g. 8-34
from tkinter import *
from PP4E.launchmodes import PortableLauncher
import os, sys
demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']
for demo in demoModules:
pid = os.fork()
filepath = './' + demo + '.py'
if pid == 0:
os.execvp('python3.5', (filepath, ))
root = Tk()
root.title('Progress')
Label(root, text='Multiple program demo: command lines', bg='white').pack()
root.mainloop()
|
9,813 | 22ddae977afd2a1b0a729cf0d56783eaaca3b0a0 | #!/usr/bin/python2
import requests ,optparse
def get_link():
parser=optparse.OptionParser()
parser.add_option("-l","--link",dest="url",help="direct link of file to download .pdf")
(url,argument)=parser.parse_args()
return url
def download(url):
try:
get_request=requests.get(url)
name_url=url.split("/")[-1]
print(name_url)
with open(name_url,"wb") as file:
file.write(get_request.content)
except:
print("[-]Print Valid Link")
def start():
url_link=get_link()
try:
download(url_link.url)
except:
url_link=input("[+]Enter link:")
download(url_link)
start()
|
9,814 | be64c981e7ea70dfcbd840988a633b4a71a43783 | from random import randint
in_file = open("vocabulary.txt", "r")
voca_dic = {}
for line in in_file:
data = line.strip().split(": ")
eng_word = data[0]
kor_word = data[1]
voca_dic[eng_word] = kor_word
while True:
keys = list(voca_dic.keys())
index = randint(1, len(keys) - 1)
input_val = input("%s: " % voca_dic[keys[index]])
if input_val == "q":
break
if input_val == keys[index]:
print("맞았습니다!")
else:
print("틀렸습니다. 정답은 %s입니다." % keys[index]) |
9,815 | 588f6f78908e47e0b3f1bc42fffabad34766eede | import numpy as np
import tensorflow as tf
from arg_parser import args
from model_object import UnetModel
def main(args):
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
unet_model = UnetModel(args)
unet_model.prepare_data(args)
unet_model.create_model(args)
unet_model.train(args)
unet_model.load_best_model(args, load_dir= args.savedir)
unet_model.evaluate(args)
if __name__ == "__main__":
main(args) |
9,816 | 1a7363736076620b7704d7264b2f0bb24514165c | from mbc import MBC
import random
import sys
from typing import Dict
from interface import Interface
from reg import Register, HandlerProxy
# I/O Registers
IE = 0xFFFF
DIV = 0xFF04
TIMA= 0xFF05
TMA = 0xFF06
TAC = 0xFF07
IF = 0xFF0F
LY = 0xFF44
class MMU():
#0000 3FFF 16KB ROM bank 00 From cartridge, usually a fixed bank
#4000 7FFF 16KB ROM Bank 01~NN From cartridge, switchable bank via MBC (if any)
#8000 9FFF 8KB Video RAM (VRAM) Only bank 0 in Non-CGB mode
#Switchable bank 0/1 in CGB mode
#
#A000 BFFF 8KB External RAM In cartridge, switchable bank if any
#C000 CFFF 4KB Work RAM (WRAM) bank 0
#D000 DFFF 4KB Work RAM (WRAM) bank 1~N Only bank 1 in Non-CGB mode
#Switchable bank 1~7 in CGB mode
#
#E000 FDFF Mirror of C000~DDFF (ECHO RAM) Typically not used
#FE00 FE9F Sprite attribute table (OAM)
#FEA0 FEFF Not Usable
#FF00 FF7F I/O Registers
#FF80 FFFE High RAM (HRAM)
#FFFF FFFF Interrupts Enable Register (IE)
def __init__(self, interface:Interface, mbc:MBC) -> None:
self._ui = interface
self.mem = bytearray(random.getrandbits(8) for _ in range(65536)) # type: ignore # Randomise RAM
view = memoryview(self.mem)
self._rom0 = view[0:0x4000]
self._rom1 = view[0x4000:0x8000]
self._vram = view[0x8000:0xA000]
self._eram = view[0xA000:0xC000]
self._wram = view[0xC000:0xE000]
self._wram2 = view[0xE000:0xFE00]
self.OAM = view[0xFE00:0xFEA0]
self.IO = view[0xFF00:0xFF80]
self._HiRAM = view[0xFF80:0xFFFF]
self.view = view
self.mbc = mbc
self.mbc.bank0 = self._rom0
self.mbc.bank1 = self._rom1
self.view[0xFE00:0xFFFF] = bytearray([0x00 for _ in range(0x1FF)]) # IO, etc defaults to blank
self.mem[0xFFFF] = 0xFF # IE
self.link_buffer = 0
self.serial_buff = ""
self._io_handlers:Dict[int, Register] = {}
self.add_io_handler(0xFF46, HandlerProxy(self.dma))
# Add bootrom disable handler
self.add_io_handler(0xFF50, HandlerProxy(self.mbc.disable_bootrom))
def dma(self, val:int) -> None:
dest = 0xFE00
offset = val * 0x100
for n in range(0xA0):
self.mem[dest + n] = self.mem[n + offset]
def __getitem__(self, val:int) -> int:
if val < 0xE000:
return self.view[val]
elif val < 0xFE00:
# Echo RAM, subtract 0x2000
return self.view[val-0x2000]
elif val < 0xFE80:
return self.OAM[val-0xFE00]
elif val < 0xFF00:
return 0xFF
elif val < 0xFF80:
if val in self._io_handlers:
return self._io_handlers[val].value
elif val == 0xFF00:
return self._ui.input
else:
return self.IO[val-0xFF00]
elif val < 0xFFFF:
return self._HiRAM[val-0xFF80]
elif val == 0xFFFF:
return self.mem[0xFFFF]
raise ValueError("Access out of bounds")
def __setitem__(self, key:int, val:int) -> None:
if key < 0x8000:
self.mbc[key] = val
elif key < 0xA000:
self._vram[key-0x8000] = val
elif key < 0xC000:
if self.mbc.ram_enabled:
# TODO: Read $0x149 and determine RAM Size
# TODO: Pass to MBC
self._eram[key-0xA000] = val
elif key < 0xE000:
self._wram[key-0xC000] = val
elif key < 0xFE00:
self._wram[key-0xE000] = val
elif key < 0xFEA0:
self.OAM[key-0xFE00] = val
elif key < 0xFF00:
pass
elif key < 0xFF80:
if key in self._io_handlers:
self._io_handlers[key].value = val
if key == 0xFF00:
self._ui.input = val
elif key == 0xFF01:
self.link_buffer = val
elif key == 0xFF02:
if val == 0x81:
self.serial_buff += chr(self.link_buffer)
if self.link_buffer == ord("\n"):
print(self.serial_buff, end='', file=sys.stderr)
# Test ROM Routines
if self.serial_buff == "Passed\n":
#sys.exit(0)
pass
elif self.serial_buff == "Failed\n":
#sys.exit(1)
pass
self.serial_buff = ""
else:
self.IO[key-0xFF00] = val
elif key < 0xFFFF:
self._HiRAM[key-0xFF80] = val
else:
self.mem[65535] = val
def add_io_handler(self, val:int, handler:Register) -> None:
self._io_handlers[val] = handler
|
9,817 | c466c7e05608b1fbba5eea5bec16d301cee3688f | default_app_config = 'teacher.apps.A1Config' |
9,818 | c7e5851a41e1cdb33cd0daa103fbf702da6e5ff7 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 10 17:48:19 2021
@author: LESLY
"""
from PICO_PLACA_class import PICO_PLACA
""" Main program of "Pico y Placa" predictor"""
def main():
print("Predictor")
placa = input("Enter the license of your vehicle in the following format AAA-####: ")
fecha = input("Enter the date in the following format AA/MM/DD: ")
hora = input("Enter the time in the following format 00:00: ")
prog =PICO_PLACA(placa,fecha,hora)
estado = prog.verificar()
if estado == "continue":
estado = prog.validar()
print("Your vehicle " + estado )
else:
print(estado)
if __name__ == '__main__':
main() |
9,819 | a7d7408808f28343a51ff6522c5e14747c8c6e43 | # Generated by Django 3.0.6 on 2020-07-06 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('s1app', '0004_auto_20200706_0753'),
]
operations = [
migrations.AlterField(
model_name='gall',
name='date',
field=models.CharField(default='날짜', max_length=10),
),
migrations.AlterField(
model_name='gall',
name='summ',
field=models.CharField(default='설명', max_length=100),
),
migrations.AlterField(
model_name='gall',
name='title',
field=models.CharField(default='제목', max_length=50),
),
]
|
9,820 | 9f4cd9ed8aea03f5908aef4a154d964f0810619b | #!/usr/bin/env python
__author__ = "Maxime Beauchamp"
__version__ = "0.1"
__date__ = "2020-12-10"
__email__ = "maxime.beauchamp@imt-atantique.fr"
from graphics_OSSE import *
# function to create recursive paths
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path) # head/tail
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
type_obs = sys.argv[1]
domain = sys.argv[2]
workpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE/scores_allmethods_nadlag_"+type_obs
scratchpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE"
if not os.path.exists(workpath):
mk_dir_recursive(workpath)
#else:
# shutil.rmtree(workpath)
# mk_dir_recursive(workpath)
## parameters
if domain=="OSMOSIS":
extent = [-19.5,-11.5,45.,55.]
indLat = 200
indLon = 160
elif domain=='GULFSTREAM':
extent = [-65.,-55.,33.,43.]
indLat = 200
indLon = 200
else:
extent=[-65.,-55.,30.,40.]
indLat = 200
indLon = 200
#lon = lon[:indLon]
#lat = lat[:indLat]
## store all data in a list
AnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle'
FP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle'
FP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle'
FP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle'
FP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
# Reload saved AnDA result
with open(AnDA_nadir_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_0 = AnDA_ssh_1
itrp_dineof_nadir_0 = itrp_dineof
with open(AnDA_nadirswot_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1
itrp_dineof_nadirswot_0 = itrp_dineof
with open(AnDA_nadir_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_5 = AnDA_ssh_1
itrp_dineof_nadir_5 = itrp_dineof
with open(AnDA_nadirswot_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1
itrp_dineof_nadirswot_5 = itrp_dineof
# Reload saved ConvAE and GE-NN results
with open(FP_GENN_nadir_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadir_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9]
## list of dates
lday1 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=60+i),"%Y-%m-%d") for i in range(20) ]
lday2 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=140+i),"%Y-%m-%d") for i in range(20) ]
lday3 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=220+i),"%Y-%m-%d") for i in range(20) ]
lday4 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=300+i),"%Y-%m-%d") for i in range(20) ]
lday = np.concatenate([lday1,lday2,lday3,lday4])
lday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ]
GT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon]
# list_data (AnDA nadir)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon])
list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon])
# arguments for plots (nadir)
labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_AnDA_nadir_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (AnDA nadirswot)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon])
list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon])
# arguments for plots (nadirswot)
labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_AnDA_nadirswot_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (GENN nadir)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon])
list_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon])
# arguments for plots (nadir)
labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_GENN_nadir_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (GENN nadirswot)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon])
list_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon])
# arguments for plots (nadirswot)
labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_GENN_nadirswot_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
|
9,821 | 0ff398775fd13fb5fbd23bf2359bb31dff6bd38c | n, x = map(int, input().split())
m = [int(input()) for _ in range(n)]
m.sort()
x -= sum(m)
print(n + x // m[0])
|
9,822 | 307e7a059f9b0b1131f8a57d0f55cf0ee05173e8 | #!/usr/bin/env python
"""
Use version of DriverSlave that has pixmap and pixheights
"""
import threading
# import base classes and driver
from bibliopixel import LEDStrip, LEDMatrix
# from bibliopixel.drivers.LPD8806 import DriverLPD8806, ChannelOrder
from bibliopixel.drivers.visualizer import DriverVisualizer, ChannelOrder
from bibliopixel.drivers.slave_driver import DriverSlave
# import colors
import bibliopixel.colors
from bibliopixel.animation import BaseStripAnim
from logging import DEBUG, INFO, WARNING, CRITICAL, ERROR
from bibliopixel import log
log.setLogLevel(WARNING)
import re
import time
from operator import or_, ior, ixor
import matplotlib.pyplot as plt
import BiblioPixelAnimations.matrix.bloom as BA
class MasterAnimation(BaseStripAnim):
"""
Takes copies of fake leds, combines using heights and mixing to fill and update
a led
NEED now ledcopies is list of the leds associated with each animation
NEED also mapping of the leds into master led (i.e. path list)
NEED also height of each animations and merging method if same height
"""
def __init__(self, led, animcopies, start=0, end=-1):
super(MasterAnimation, self).__init__(led, start, end)
if not isinstance(animcopies, list):
animcopies = [animcopies]
self._animcopies = animcopies
self._ledcopies = [a._led for a, f in animcopies]
self._idlelist = []
self.timedata = [[] for _ in range(len(self._ledcopies))] # [[]] * 5 NOT define 5 different lists!
self._led.pixheights = [0] * self._led.numLEDs
# def preRun(self, amt=1):
# self._led.all_off()
# for w, f in self._animcopies:
# w.run(fps=f, max_steps=runtime * f, threaded = True)
def preStep(self, amt=1):
#print 'prestep {}'.format(self._step)
# only step the master thread when something from ledcopies
# has been done i.e. its event _wait must be false (I THINK)
# TODO is this good code???? or is there a better way to block
self._idlelist = [True] # to insure goes thru while loop at least once
while all(self._idlelist):
self._idlelist = [not ledcopy.driver[0]._updatenow.isSet() for ledcopy in self._ledcopies]
if self._stopEvent.isSet():
self.animComplete = True
print 'breaking out'
break
#
def postStep(self, amt=1):
# clear the ones found in preStep
activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
[self._ledcopies[i].driver[0]._updatenow.clear() for i in activewormind]
def step(self, amt=1):
"""
combines the buffers from the slave led's
which then gets sent to led via update
"""
# For checking if all the animations have their framse looked at
#activewormind = [i for i, x in enumerate(self._idlelist) if x == False]
#print "Worm {} at {:5g}".format(activewormind, 1000*(time.time() - starttime))
# save times activated for each worm
[self.timedata[i].append(1000*(time.time() - starttime)) for i, x in enumerate(self._idlelist) if x == False]
#self._led.buffer = [0] * 480
self._led.pixheights = [-100] * self._led.numLEDs
#print type(self._led.buffer)
for ledcopy in self._ledcopies:
# self._led.buffer = map(ixor, self._led.buffer, ledcopy.buffer)
# use pixheights but assume all buffers same size
# print ledcopy.driver[0].pixheights
for pix in range(self._led.numLEDs):
#for ledcopy in self._ledcopies:
if self._led.pixheights[pix] == ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] ^= ledcopy.buffer[3*pix + i]
elif self._led.pixheights[pix] < ledcopy.driver[0].pixheights[pix]:
for i in range(3):
self._led.buffer[3*pix + i] = ledcopy.buffer[3*pix + i]
self._led.pixheights[pix] = ledcopy.driver[0].pixheights[pix]
self._step += 1
def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, joinThread = False, callback=None):
#def run(self, amt = 1, fps=None, sleep=None, max_steps = 0, untilComplete = False, max_cycles = 0, threaded = True, joinThread = False, callback=None):
# self.fps = fps
# self.untilComplete = untilComplete
super(MasterAnimation, self).run(amt = 1, fps=fps, sleep=None, max_steps = 0, untilComplete = untilComplete, max_cycles = 0, threaded = True, joinThread = joinThread, callback=callback)
class Worm(BaseStripAnim):
"""
colors a list the worm segment (starting with head) colors
path a list of the LED indices over which the worm will travel
cyclelen controls speed, worm movement only when LED upload
cycles == 0 mod cyclelen
height (of worm segments) is same length as colors: higher
value worms segments go over top of lower value worms
"""
def __init__(self, led, colors, path, cyclelen, direction=1,
height=None, start=0, end=-1):
super(Worm, self).__init__(led, start, end)
if height is None:
height = [0]*len(colors)
elif type(height) == int:
height = [height]*len(colors)
self._colors = colors
self._colors.append((0, 0, 0)) # add blank seqment to end worm
self._path = path
self._cyclelen = cyclelen
self._height = height
self._height.append(-1) # add lowest value for height
self._activecount = 0
self._direction = direction
self._headposition = -self._direction
#print self._colors
#print self._height
def step(self, amt=1):
if self._activecount == 0:
self._headposition += amt*self._direction
self._headposition %= len(self._path)
# Put worm into strip and blank end
segpos = self._headposition
for x in range(len(self._colors)):
if True: #self._height[x] >= LEDsegheights[self._path[segpos]]: # or x == len(self.colors) - 1:
#if self._height[x] >= self._led.driver[0].pixheights[self._path[segpos]]: # or x == len(self.colors) - 1:
self._led.set(self._path[segpos], self._colors[x])
self._led.driver[0].pixheights[self._path[segpos]] = self._height[x]
segpos -= self._direction
segpos %= len(self._path)
self._activecount += amt
self._activecount %= self._cyclelen
self._step += amt
def pathgen(nleft=0, nright=15, nbot=0, ntop=9, shift=0, turns=10, rounds=16):
"""
A path around a rectangle from strip wound helically
10 turns high by 16 round.
rounds * turns must be number of pixels on strip
nleft and nright is from 0 to rounds-1,
nbot and ntop from 0 to turns-1
"""
def ind(x, y):
return x + y * rounds
assert 0 <= nleft <= nright -1 <= rounds and 0 <= nbot <= ntop -1 <= turns
nled = rounds*turns
sleft = range(ind(nleft, nbot), ind(nleft, ntop), rounds)
tp = range(ind(nleft, ntop), ind(nright, ntop), 1)
sright = range(ind(nright, ntop), ind(nright, nbot), -rounds)
bt = range(ind(nright, nbot), ind(nleft, nbot), -1)
path = sleft+tp+sright+bt
if len(path) == 0:
path = [ind(nleft, nbot)]
path = map(lambda x: (shift+x) % nled, path)
log.logger.info("pathgen({}, {}, {}, {}, {}) is {}".format(nleft, nright, nbot, ntop, shift, path))
return path
if True: #__name__ == '__main__':
drivermaster = DriverVisualizer(160, pixelSize=62, stayTop=False, maxWindowWidth=1024)
# using pixelSize 62 and changed code of visualizer.py to have maxWindowWidth=1024
#drivermaster = DriverVisualizer(160, pixelSize=31, stayTop=False)
#ledmaster = LEDStrip(drivermaster, threadedUpdate=True)
ledmaster = LEDStrip(drivermaster)
lnin = [255, 222, 200, 150, 125]
bluedimming = [(0, 0, i) for i in lnin]
bluedimming = [(0, 0, 0) for i in lnin]
reddimming = [(i, 0, 0) for i in lnin]
greendimming = [(0, i, 0) for i in lnin]
cyandimming = [(0, i, i) for i in lnin]
whitedimming = [(i, i, i) for i in lnin]
# Worm arguments
wormblue = (bluedimming, pathgen(5, 10, 0, 9), 1, 1, 6)
wormred = (reddimming, pathgen(1, 14, 1, 8), 1, 1, 2)
wormgreen = (greendimming, pathgen(2, 13, 2, 7), 1, 1, 3)
wormcyan = (cyandimming, pathgen(3, 12, 3, 6), 1, 1, 4)
wormwhite = (whitedimming, pathgen(4, 11, 4, 5), 1, 1, 5)
# List of pair (animation arguments, fps)
wormdatalist = [(wormblue, 24), (wormred, 20), (wormgreen, 16), (wormcyan, 12), (wormwhite, 8)]
#wormdatalist = [(wormwhite, 8)]
#wormdatalist = []
# dummy strips must each have their own slavedriver as thread is attached
# to the driver
ledslaves = [LEDStrip(DriverSlave(160, pixheights=-1), threadedUpdate=True) for _ in range(len(wormdatalist))]
# Make the Worm animations an list pairs (animation, fps)
wormlist = [(Worm(ledslaves[i], *d[0]), d[1]) for i, d in enumerate(wormdatalist)]
ledslaveb = LEDMatrix(DriverSlave(160, None, 0), width=16, height=10, threadedUpdate=True)
bloom = BA.Bloom(ledslaveb)
wormlist.append((bloom, 10))
#masteranimation = MasterAnimation(ledmaster, [w._led for w, f in wormlist])
masteranimation = MasterAnimation(ledmaster, wormlist)
starttime = time.time()
runtime = 1
# Master steps when it gets a go ahdead signal from one of the
# concurrent annimations
masteranimation.run(fps=None) # if give fps for master will skip faster frames
# Run all the slave animations and master threaded
# The slave animations update their buffers at the correct
# time and rather than update, just signal the master they
# are ready to be combined and sent to the actual leds
for w, f in wormlist:
w.run(fps=f, max_steps=runtime * f, threaded = True)
#print threading.enumerate()
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
# idle and threaded animations will run jointly
while not all([w.stopped() for w, f in wormlist]):
pass
# stop the master
masteranimation.stopThread(True) # need True
print "Master Animation Step Count {}".format(masteranimation._step)
ledmaster.waitForUpdate()
ledmaster.stopUpdateThreads()
[w._led.stopUpdateThreads() for w, f in wormlist]
print "THREADS: " + ",".join([re.sub('<class |,|bibliopixel.\w*.|>', '', str(s.__class__)) for s in threading.enumerate()])
plt.clf()
col = 'brgcwk'
[plt.plot(masteranimation.timedata[i], [i] * len(masteranimation.timedata[i]), col[i%6]+'o') for i in range(len(wormlist))]
ax = plt.axis()
delx = .01 * (ax[1] - ax[0])
plt.axis([ax[0]-delx, ax[1]+delx, ax[2]-1, ax[3]+1])
|
9,823 | 38363316cc9a8419a528bb78b9ad03682e24172d | # 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。
#
# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。
#
# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。
#
# 示例 1:
#
# 输入: cost = [10, 15, 20]
# 输出: 15
# 解释: 最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。
#
#
# 示例 2:
#
# 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
# 输出: 6
# 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。
#
#
# 注意:
#
#
# cost 的长度将会在 [2, 1000]。
# 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。
#
# Related Topics 数组 动态规划
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
# f1 = f2 = 0
# for x in reversed(cost):
# f1, f2 = x + min(f1, f2), f1
# return min(f1, f2)
result = [0 for _ in range(len(cost))]
result[0] = cost[0]
result[1] = cost[1]
for j in range(2, len(result)):
result[j] = min(result[j - 1], result[j - 2]) + cost[j]
return min(result[-2], result[-1])
if __name__ == '__main__':
solution = Solution()
costs = [10, 15, 20]
res = solution.minCostClimbingStairs(costs)
print(res)
|
9,824 | fc273a286a462cb673edaa2de2ecc6b9ca631004 | def dot_product(a,b):
ans = 0
for i in range(len(a)):
ans += a[i]* b[i]
return ans
n = int(input())
a = sorted(list(map(int,input().split())))
b = sorted(list(map(int,input().split())))
print(dot_product(a, b))
|
9,825 | a4d5064decdc9963dae1712c7c6918b3e5902bf2 | #!/usr/bin/env python
import socket
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
from matplotlib import style
import pickle
# Create figure for plotting
time_list = []
gain_list = []
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
conn, addr = s.accept()
fig, ax1 = plt.subplots()
ax1.set_ylim(-.1, 1.1)
ax1.set_xlim(0, 2)
def recieve_data():
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
msg = pickle.loads(data)
time = float(msg[0])
gain = float(msg[1])
yield time , gain
conn.close()
def animate(i):
xs = []
ys = []
for line in recieve_data():
if len(xs) < 50:
x, y = line
#print(x,y)
xs.append(float(x))
ys.append(float(y))
else:break
print(xs,ys)
ax1.clear()
ax1.plot(xs, ys)
ani = animation.FuncAnimation(fig, animate, interval=10)
plt.show()
|
9,826 | 8dd864f1313f1e6f131ee11d4db99fbc46519126 | import unittest
from unittest.mock import patch
from redis import Redis
from rq.job import JobStatus
from rq.maintenance import clean_intermediate_queue
from rq.queue import Queue
from rq.utils import get_version
from rq.worker import Worker
from tests import RQTestCase
from tests.fixtures import say_hello
class MaintenanceTestCase(RQTestCase):
@unittest.skipIf(get_version(Redis()) < (6, 2, 0), 'Skip if Redis server < 6.2.0')
def test_cleanup_intermediate_queue(self):
"""Ensure jobs stuck in the intermediate queue are cleaned up."""
queue = Queue('foo', connection=self.testconn)
job = queue.enqueue(say_hello)
# If job execution fails after it's dequeued, job should be in the intermediate queue
# # and it's status is still QUEUED
with patch.object(Worker, 'execute_job'):
# mocked.execute_job.side_effect = Exception()
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertFalse(job.id in queue.get_job_ids())
self.assertIsNotNone(self.testconn.lpos(queue.intermediate_queue_key, job.id))
# After cleaning up the intermediate queue, job status should be `FAILED`
# and job is also removed from the intermediate queue
clean_intermediate_queue(worker, queue)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertIsNone(self.testconn.lpos(queue.intermediate_queue_key, job.id))
|
9,827 | 672add6aa05e21d3605c05a23ff86281ffc3b17c | from typing import List
class LanguageDefinition:
"""Language definition containing general constants and methods."""
@staticmethod
def get_translated_file_name(filename: str):
"""
:returns: Translated file name.
"""
return filename
@staticmethod
def create_project_files(project_path: str, added_file_paths: List[str] = None) -> str:
"""
Create supporting project files for a translated file.
:param project_path: Project path.
:param added_file_paths: List of paths for files to add to the project.
:returns: Project path.
"""
pass
@staticmethod
def format_file(file_path: str, request_data=None) -> List[str]:
"""
Format file.
:param file_path: File path.
:param request_data: Request body data from "/translate" API endpoint.
:returns: Formatted file lines.
"""
return open(file_path).readlines()
@staticmethod
def format_project_files(project_path: str) -> List[str]:
"""
Format project files.
:param project_path: Project path.
"""
pass
@staticmethod
def to_single_line_comment(text: str) -> str:
"""
Convert a line of text to a single-line comment.
:param text: Line of text.
:returns: Single-line comment.
"""
pass
@staticmethod
def to_multi_line_comment(text: str) -> str:
"""
Convert a line of text to a multi-line comment.
:param text: Line of text.
:returns: Multi-line comment.
"""
pass
|
9,828 | e296a5bea5465c2b84e37c7d83922adb01feab70 | # Generated by Django 2.2.14 on 2020-08-25 17:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200825_1318'),
]
operations = [
migrations.RenameField(
model_name='cv',
old_name='additionalskills_text',
new_name='additional_skills_text',
),
migrations.RenameField(
model_name='cv',
old_name='additionalskills_title',
new_name='additional_skills_title',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_date',
new_name='work_experience_date',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_header',
new_name='work_experience_header',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_text',
new_name='work_experience_text',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_title',
new_name='work_experience_title',
),
]
|
9,829 | 1aa49bc9a3ea12dffff907d17bd40b4425f28e13 | #!/usr/bin/python
import time
from daemon import runner
import graphitesend
from pywatts import get_data
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/currentcost_daemon.pid'
self.pidfile_timeout = 5
def run(self):
while True:
graphitesend.init(graphite_server='localhost', system_name='', group='power', prefix='house')
try:
watts, temperature = get_data()
graphitesend.send_dict({'temperature':temperature, 'usage':watts})
time.sleep(5)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
time.sleep(5)
app = App()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action()
|
9,830 | 226bb323597100b57ef83eb0d5e4a9b894b77fd2 | import sys
character_dict = {}
f = open(sys.argv[1], 'r')
while True:
pinyin = f.readline().strip()
character = f.readline().strip()
if not character: break
character_dict[pinyin] = character
import time
fout = open(sys.argv[1][:-3] + "_guess_char.out", 'w')
fout.write("-----------------------------")
fout.write("\n")
import random
incorrect = []
pinyin_keys = list(character_dict.keys())
random.shuffle(pinyin_keys)
problems_left = pinyin_keys
additional = 0
while len(problems_left) > 0:
key = problems_left[0]
chinese_character = character_dict[key]
pinyin = key
# result = chinese_character.encode('big5').decode('big5')
guess = input("Guess for " + pinyin + "(Press Enter): ")
print(chinese_character, "<-- Answer")
correctResult = True if input("Did you get it right? y/n?") == "y" else False
if correctResult:
print("CORRECT! Nice!")
else:
print("WRONG!", pinyin,"==", chinese_character)
fout.write("WRONG! " + pinyin + " == " + chinese_character)
fout.write("\n")
incorrect.append(pinyin)
problems_left.append(pinyin)
additional += 1
del problems_left[0]
print("Here's the ones you got wrong!")
for key in incorrect:
print(key, "--", character_dict[key])
fout.write(key + "--" + character_dict[key])
correct_num = len(character_dict) + additional - len(incorrect)
print("ACCURACY:", correct_num, "/", len(character_dict) + additional, ":", int(100 * correct_num/(len(character_dict) + additional)), "%")
fout.write("ACCURACY: " + str(correct_num) + "/" + str(len(character_dict) + additional) + " : " + str(100 * correct_num/(len(character_dict) + additional)) + "%")
fout.write("-----------------------------") |
9,831 | c0d8f2542f9cf9a5097011c61c90073c031d2708 | from z3 import *
import re
dna = re.compile("dna_(\d+)")
opt = Optimize()
opt.from_file("../benchmarks/bench.smt2")
set_option("sat.random_seed",23)
def get_soft(soft):
return [f.arg(0) for f in soft.children()]
def free_vars(fs):
seen = set([])
vars = set([])
def fv(seen, vars, f):
if f in seen:
return
seen |= { f }
if f.decl().kind() == Z3_OP_UNINTERPRETED:
vars |= { f }
for c in f.children():
fv(seen, vars, c)
for f in fs:
fv(seen, vars, f)
return vars
def is_dna(f):
return f.decl().name().startswith("dna")
def dna_num(f):
m = dna.search(f.decl().name())
assert m
return int(m.group(1))
def split_fmls(fmls):
A = []
B = []
C = []
varsA = set([])
varsB = set([])
min_dna = 100000
max_dna = 0
for f in fmls:
vars = free_vars([f])
for v in vars:
if is_dna(v):
n = dna_num(v)
if n < min_dna:
min_dna = n
if n > max_dna:
max_dna = n
print(min_dna, max_dna)
mid = (max_dna + min_dna) / 2
print("Mid: ", mid)
for f in fmls:
vars = free_vars([f])
above = False
below = False
for v in vars:
if is_dna(v):
n = dna_num(v)
if n > mid:
above = True
else:
below = True
if not above and not below:
C.append((f, vars))
continue
if below:
A.append(f)
varsA |= vars
if above:
B.append(f)
varsB |= vars
for (f, vars) in C:
for v in vars:
if v in varsA:
A.append(f)
break
for v in vars:
if v in varsB:
B.append(f)
break
# print(A)
# print(B)
return A, B
def split_opt(opt):
soft = opt.objectives()[0]
fmls = opt.assertions()
A, B = split_fmls(opt.assertions())
varsA = free_vars(A)
varsB = free_vars(B)
soft_fmls = get_soft(soft)
shared_vars = { v for v in varsA if v in varsB }
optA = Optimize()
optB = Optimize()
optA.add(A)
optB.add(B)
for s in soft_fmls:
vars = free_vars([s])
for v in vars:
if v in varsA:
optA.add_soft(s)
break
for v in vars:
if v in varsB:
optB.add_soft(s)
break
return optA, optB, shared_vars
set_option(verbose=1)
def split_rec(opt, depth):
if depth == 0:
opt.check()
mdl = opt.model()
lb = mdl.eval(opt.objectives()[0])
return lb, lb, opt.model()
optA, optB, shared_vars = split_opt(opt)
lbA, ubA, mA = split_rec(optA, depth - 1)
lbB, ubB, mB = split_rec(optB, depth - 1)
mCommon = [ v == mA.eval(v) for v in shared_vars if mA.eval(v).eq(mB.eval(v)) ]
print("Fix common values:", len(mCommon), mCommon)
opt.add(mCommon)
opt.check()
mdl = opt.model()
ub = mdl.eval(opt.objectives()[0])
lb = mdl.eval(lbA + lbB)
print(lb, ub, mdl.eval(ubA + ubB))
return lb, ub, opt.model()
lb, ub, mdl = split_rec(opt, 4)
print(mdl)
print("value: ", mdl.eval(opt.objectives()[0]))
#optA1, optA2, shared_vars2 = split_opt(optA)
#optA.set(enable_lns=True)
#optA1.check()
#mA1 = optA1.model()
#optA2.add([v == mA1.eval(v) for v in shared_vars2])
#optA2.check()
#mA2 = optA2.model()
#for v in shared_vars2:
# print(v, mA1.eval(v), mA2.eval(v))
#optA1.add([v == mA2.eval(v) for v in shared_vars2])
#optA1.check()
|
9,832 | 63e28e6a1ea5db1d1c41bbc755b9c33905e066bb | #!/usr/bin/env python3
''' towerdev - Ansible Tower Testing Framework
MIT License
Copyright © 2021 falcon78921
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import towerdev.common
import towerdev.utilities
from colors import *
# Invoke dockerClient()
dockerClient = towerdev.common.dockerClient()
def runContainer(purpose, externalPort, osVersion, containerName, debug=True, **kwargs):
"""Run supplemental container from pre-existing image"""
# Optional debug that prints a dict of options
if debug == True:
runSpecialOpts = dict(purpose=purpose, externalPort=externalPort, osVersion=osVersion, containerName=containerName)
print(runSpecialOpts)
# Determines what we do based on purpose
if purpose == "ssh":
if osVersion == "centos7":
sshContainer = dockerClient.containers.run('centos7/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})
elif osVersion == "centos8":
sshContainer = dockerClient.containers.run('centos8/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})
containersList = dockerClient.containers.list(filters={'name': containerName})
if len(containersList) == 1:
creationStatus = True
else:
creationStatus = False
return creationStatus
def runContainerCluster(towerVersion, osVersion, namingConvention, stream=True, **kwargs):
"""Run Tower containers in a clustered setup"""
# runContainerCluster() defaults; can be overriden via **kwargs
externalPort = None
containerCount = 3
debug = True
loadBalance = False
# Optional debug that prints a dict of options
if debug:
runClusterOpts = dict(towerVersion=towerVersion, osVersion=osVersion, loadBalance=loadBalance, namingConvention=namingConvention, externalPort=externalPort, containerCount=containerCount, debug=debug)
print(runClusterOpts)
# Check to see if specified towerVersion has image built
check = towerdev.utilities.imageCheck(towerVersion)
# How we proceed with imageCheck() return
if check is False:
print(color("ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.", fg="red"))
return False
else:
for c in range(containerCount):
runTowerContainer(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName="{0}-{1}".format(namingConvention,c))
clusterContainers = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
containerIps = []
# Gather container IPs for inventory fillout
for c in range(len(clusterContainers)):
containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress']
containerIps.append(containerIp)
print(clusterContainers[0])
# Choose inventory file based on towerVersion
if "3.5" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
elif "3.6" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
elif "3.7" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
elif "3.8" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
# Choose messaging backend based on towerVersion
if "3.5" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} rabbitmq_host={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
elif "3.6" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} rabbitmq_host={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
elif "3.7" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} routable_hostname={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
elif "3.8" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} routable_hostname={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
# Call ./setup.sh from first container in list
setupCmd = '/bin/bash -c "cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh"'.format(towerVersion)
setupLbCmd = '/bin/bash -c "cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true"'.format(towerVersion)
inventoryDbVersion = towerVersion.replace(".", "")
modifyInventoryDbCmd = "sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory".format(inventoryDbVersion, towerVersion)
runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)
if loadBalance:
print(color("INFO: Running ./setup.sh with load balance configuration...", fg="yellow"))
# Stream output based on option
if stream:
lowLevelClient = towerdev.common.apiClient()
calcRunContainer = len(clusterContainers) - 1
createExec = lowLevelClient.exec_create(container="{0}-{1}".format(namingConvention, calcRunContainer), cmd=setupLbCmd)
runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)
for line in runSetupCmd:
print(line.decode('utf-8'))
inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])
setupCmdCode = inspect['ExitCode']
containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
if len(containersList) == containerCount:
clusterStatus = True
else:
clusterStatus = False
if setupCmdCode is not 0:
clusterStatus = False
else:
runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)
else:
print(color("INFO: Running ./setup.sh with no load balance configuration...", fg="yellow"))
# Stream output based on option
if stream:
lowLevelClient = towerdev.common.apiClient()
calcRunContainer = len(clusterContainers) - 1
createExec = lowLevelClient.exec_create(container="{0}-{1}".format(namingConvention, calcRunContainer), cmd=setupCmd)
runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)
for line in runSetupCmd:
print(line.decode('utf-8'))
inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])
setupCmdCode = inspect['ExitCode']
containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
if len(containersList) == containerCount:
clusterStatus = True
else:
clusterStatus = False
if setupCmdCode is not 0:
clusterStatus = False
else:
runSetupCmd = towerContainer.exec_run(cmd=setupCmd)
containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
if len(containersList) == containerCount:
clusterStatus = True
else:
clusterStatus = False
if runSetupCmd[0] is not 0:
clusterStatus = False
return clusterStatus
def runTowerContainer(towerVersion, externalPort, osVersion, containerName, debug=False, **kwargs):
"""Runs Tower container from pre-existing image"""
allowedMemory = None
# Optional debug that prints a dict of options
if debug == True:
runOpts = dict(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName=containerName)
print(runOpts)
# Determines what we do based on externalPort input
if not externalPort:
if allowedMemory is not None:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, mem_limit=allowedMemory, ports={'443/tcp':None})
else:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'443/tcp':None})
else:
if allowedMemory is not None:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, mem_limit=allowedMemory, ports={'443/tcp':externalPort})
else:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'443/tcp':externalPort})
containersList = dockerClient.containers.list(filters={'name': containerName})
if len(containersList) == 1:
creationStatus = True
else:
creationStatus = False
return creationStatus
|
9,833 | f9b48c1b6489d8981e192838cf1c734e2296ab15 | #사각형의 면적을 구하는 프로그램을 작성하시오,
#사각형의 면적 = 높이*밑변
height=int(input('높이 입력: '))
base=int(input('밑변 입력: '))
area=height*base
print('높이는',height,' 밑변은',base,'사각형의 면적은',area,'입니다.')
|
9,834 | 29eb1a1642d38160c138733e269bb3ba0c5d4bba |
def primo(num):
if num < 1:
print(f"El numero {num} no es primo")
return None
else:
if num == 2:
print(f"El numero {num} es primo")
return None
else:
for i in range(2, num):
if num % i == 0:
print(f"El numero {num} no es primo")
return None
print(f"El numero {num} es primo")
def leerNumero():
numer = int(input("Escribe un numero ==> "))
primo(numer)
def main():
leerNumero()
if __name__ =="__main__":
main() |
9,835 | e018d28cbacb568596eb9a5134581db960111e14 | from django import forms
from . import models
from .validators import validate_metadata
class ServiceProviderForm(forms.ModelForm):
xml = forms.CharField(label='SAML Metadata XML',
widget=forms.Textarea,
validators=[validate_metadata])
class Meta:
model = models.ServiceProvider
fields = ('xml',) |
9,836 | b11869076c2c8d6207df861cd1d0b0434b3f9477 | from peewee import BlobField
class BytesField(BlobField):
"""This is a BlobField adapted to our needs
Default BlobField returns memoryview when getting data from the db. We want bytes.
"""
def adapt(self, value):
if value and isinstance(value, memoryview):
return value.tobytes()
return value
|
9,837 | 697f4dd640ddba0411eb6eb68e7ce079a6330670 | from typing import Any
from jinja2.environment import Environment
MAX_RANGE = ... # type: int
UNSAFE_FUNCTION_ATTRIBUTES = ... # type: Any
UNSAFE_METHOD_ATTRIBUTES = ... # type: Any
UNSAFE_GENERATOR_ATTRIBUTES = ... # type: Any
def safe_range(*args): ...
def unsafe(f): ...
def is_internal_attribute(obj, attr): ...
def modifies_known_mutable(obj, attr): ...
class SandboxedEnvironment(Environment):
sandboxed = ... # type: bool
default_binop_table = ... # type: Any
default_unop_table = ... # type: Any
intercepted_binops = ... # type: Any
intercepted_unops = ... # type: Any
def intercept_unop(self, operator): ...
binop_table = ... # type: Any
unop_table = ... # type: Any
def __init__(self, *args, **kwargs) -> None: ...
def is_safe_attribute(self, obj, attr, value): ...
def is_safe_callable(self, obj): ...
def call_binop(self, context, operator, left, right): ...
def call_unop(self, context, operator, arg): ...
def getitem(self, obj, argument): ...
def getattr(self, obj, attribute): ...
def unsafe_undefined(self, obj, attribute): ...
def call(__self, __context, __obj, *args, **kwargs): ...
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
def is_safe_attribute(self, obj, attr, value): ...
|
9,838 | 5c5cfcd240c8b05970dc8dff57bfbbdc98f1d100 | # Time: O(|V| + |E|)
# Space: O(|V|)
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = range(3)
def dfs(graph, node, lookup):
if lookup[node] != WHITE:
return lookup[node] == BLACK
lookup[node] = GRAY
if any(not dfs(graph, child, lookup) for child in graph[node]):
return False
lookup[node] = BLACK
return True
lookup = [WHITE]*len(graph)
return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))
|
9,839 | da5a366d1cc4f192a220dc38c7a74aeb3fba7cdb | #
# @lc app=leetcode.cn id=909 lang=python3
#
# [909] 蛇梯棋
#
# @lc code=start
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
N = len(board)
def get_pos(num):
r = (num-1) // N
c = (num-1) % N
c = c if ((r+1) & 1) else (N-1 - c)
r = N-1 - r
return r, c
# r, c = get_pos(20)
# print(r, c)
def skip(num):
r, c = get_pos(num)
if board[r][c] != -1:
return board[r][c]
else:
return num
from collections import deque
dq = deque([1])
vis = set([1])
step = -1
while dq:
sz = len(dq)
step += 1
for _ in range(sz):
node = dq.popleft()
if (node == N*N):
return step
for i in range(1, 7):
new_node = node + i
if (new_node > N*N):
continue
new_node = skip(new_node)
if (new_node not in vis):
dq.append(new_node)
vis.add(new_node)
return -1
""" 21-06-27 每日一题打卡BFS
Accepted
211/211 cases passed (100 ms)
Your runtime beats 99.08 % of python3 submissions
Your memory usage beats 14.68 % of python3 submissions (15.1 MB)
"""
# board = [[-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,35,-1,-1,13,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,15,-1,-1,-1,-1]]
# s = Solution().snakesAndLadders(board)
# print(s)
# @lc code=end
|
9,840 | 7c2a59f698b75d0de89a16310d97a01506c99cb3 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import socket
import select
import time
"""=====================Head Define====================="""
UDP_RECEIVE_TIMEOUT = 1
LOOP_DELAY = 1
"""=====================Class====================="""
class UDP_packet:
def __init__(self,board_info, board_add, state):
self.board_type = int("{0:08b}".format(board_info)[:4], 2)
self.board_num = int("{0:08b}".format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return "Type:{}, Num:{}, Addr:{}, State:{}".format(self.board_type, self.board_num, self.board_add, self.state)
def __repr__(self):
return "Type:{}, Num:{}, Addr:{}, State:{}".format(self.board_type, self.board_num, self.board_add, self.state)
"""=====================Support functions====================="""
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = "127.0.0.2"
UDP_MASTER_PORT = 5005
UDP_PC_IP = "127.0.0.1"
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = "192.168.1.26"
UDP_MASTER_PORT = 5005
UDP_PC_IP = "192.168.1.25"
UDP_PC_PORT = 5005
print("My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}".format(UDP_PC_IP, UDP_PC_PORT,UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
"""===================== MAIN ====================="""
def main(sock):
data = b"HELLO"
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80) # buffer size is 1024 bytes
print("PC: I just received message: [{0}]".format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print("PC: I just Sent a [{0}]".format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print("PC: I just Sent a [{0}]".format(data))
if __name__ == '__main__':
sock = init_UDP_connection()
main(sock) |
9,841 | e5bf57e7a171f7e42928b78d09dda7593a231cf9 | """
Every block element test will be automatically
wrapped inside `<p></p>\n`. Thats why every block
test should include this wrapper tag.
"""
from io import BytesIO
from unittest import TestCase
from unittest.mock import patch, Mock
import pytest
from django.core.files import File
from django_dynamic_fixture import G
from magplan.models import Attachment
from magplan.xmd.renderer import XMDRenderer
from magplan.xmd.mappers import plan_internal_mapper
@pytest.mark.django_db
class TestImage(TestCase):
MOCK_SRC = 'dummy.jpg'
MOCK_TITLE = 'title'
MOCK_ALT_TEXT = 'alt_text'
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])
self.expected_html = (
'<figure>'
'<img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption>'
'</figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)
assert html == self.expected_html
|
9,842 | f23bfef2daf8fda4249435821dbc2e0b1846e3d6 |
def towers_of_hanoi(n, src, dest, temp,res):
if n==1:
s = 'disk 1 from ',src,'->',dest
res.append(s)
return
towers_of_hanoi(n-1, src, temp, dest, res)
s = 'disk ',n, ' from ',src,'->',dest
res.append(s)
towers_of_hanoi(n-1, temp, dest, src, res)
return res
def steps_in_tower_of_hanoi(no_of_disks):
res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[])
return res
if __name__ == "__main__":
no_of_disks = int(input())
res = steps_in_tower_of_hanoi(no_of_disks)
print('\n'.join([' '.join(map(str, x)) for x in res]))
print('\n')
|
9,843 | 917241482dc1f234d5fae9c107a5f21b018fe6d4 | def getmin(a, b, c):
if a <= b and a <= c:
print(a)
elif b <= a and b <= c:
print(b)
else:
print(c)
def filtername(name):
if len(name) > 3:
return name[:3]
elif len(name) < 3:
return name + " " * (3 - len(name))
return name
def filternames(names):
re = []
for n in names:
if len(n) != 3:
re += [filtername(n)]
return re
def printsort2(x):
for i in range(len(x) - 1):
for j in range(1 + i, len(x)):
if x[i] > x[j]:
x[i], x[j] = x[j], x[i]
for a in x:
print(a, end=" ")
def print_hell(inp):
if "안녕" in inp:
print("Hello")
|
9,844 | 0f94537fa64066bb29c5e9e97836b0a8ac01ac19 | from django.shortcuts import render
from django.shortcuts import redirect
from block.models import Block
from .models import Article
from .forms import ArticleForm
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
def article_list(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
all_articles = Article.objects.filter(block=block, article_status=0).order_by("-id")
ARTICLE_CNT_1PAGE = 2
p = Paginator(all_articles, ARTICLE_CNT_1PAGE)
page_no = int(request.GET.get("page_no", "1"))
page = p.page(page_no)
articles_objs = page.object_list
page_links = [i
for i in range(page_no - 2, page_no + 3) if i > 0 and i <= p.num_pages]
return render(request, "article_list.html",
{"articles": articles_objs, "b": block, "page_no": page_no, "page": page,
"page_links": page_links, "p": p})
@login_required
def article_create(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
if request.method == "GET":
return render(request, "article_create.html", {"b": block})
else:
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.owner = request.user
article.block = block
article.article_status = 0
article.save()
return redirect("/article/list/%s" % block_id)
else:
return render(request, "article_create.html", {"b": block, "form": form})
def article_detail(request, article_id):
article = Article.objects.get(id=article_id)
return render(request, "article_detail.html", {"article": article})
|
9,845 | f471062573a5ec8cfeb194168edfba3d2700cac6 | from models import Sensor
import mysql.connector as mariadb
## CREATE A DB WITH MARIADB ##
mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')
cursor = mariadb_connection.cursor()
def closeConnection():
cursor.close()
mariadb_connection.close()
return
def getTasks(amount):
mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')
cursor = mariadb_connection.cursor()
all_data = []
cursor.execute("SELECT * FROM Sensor")
all_entries = cursor.fetchall()
for row in all_entries:
entry = Sensor(row[0], row[1], row[2])
all_data.append(entry.data)
closeConnection()
return all_data
def getTask(task_id):
mariadb_connection = mariadb.connect(user='web', password='raspberry', database='PlantHubDB')
cursor = mariadb_connection.cursor()
cursor.execute("SELECT * FROM Sensor WHERE ID={}".format(task_id))
entry = cursor.fetchall()
data = Sensor(entry[0][0], entry[0][1], entry[0][2])
closeConnection()
return data.data
|
9,846 | 23099b29fb5898c2556d1612690e33860662ca35 | from pyspark.sql.types import StructType, StructField, StringType, TimestampType, IntegerType
from main.config.spark_config import SparkConfiguration
import main.config.constants as Constants
from main.connectors.kafka_connector import KafkaConnector, extract_json_data
def main():
# Configure Spark Session
config = {
"spark.jars.packages": "io.delta:delta-core_2.12:0.8.0,"
"org.postgresql:postgresql:9.4.1211,"
"org.apache.spark:spark-streaming-kafka-0-10_2.12:3.0.0,"
"org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0",
"spark.sql.extensions": "io.delta.sql.DeltaSparkSessionExtension",
"spark.driver.memory": "8g",
"spark.sql.catalog.spark_catalog": "org.apache.spark.sql.delta.catalog.DeltaCatalog",
Constants.DELTA_SRC_PATH: Constants.DELTA_LOCATION,
Constants.POSTGRESQL_DB: Constants.POSTGRESQL_DB_VALUE,
Constants.POSTGRESQL_USER: Constants.POSTGRESQL_USER_VALUE,
Constants.POSTGRESQL_PASSWORD: Constants.POSTGRESQL_PASSWORD_VALUE,
Constants.POSTGRESQL_HOST: Constants.POSTGRESQL_HOST_VALUE,
Constants.KAFKA_SERVER: Constants.KAFKA_SERVER_NAME,
}
spark_configuration = SparkConfiguration(app_name="visits_ads_event_ingestion", spark_master="local[4]",
log_level="WARN", configuration=config)
import main.orchestrator as Orchestrator
########################
# Visit events ingestion
########################
visits_schema = StructType([
StructField('id_user', IntegerType(), False),
StructField('id_video', IntegerType(), False),
StructField('id_device', IntegerType(), False),
StructField('id_location', IntegerType(), False),
StructField('visit_date', TimestampType(), True)
])
visits_stream = KafkaConnector(spark_configuration).get_stream('visits', start_from_begining=False).load()
visits_stream = extract_json_data(visits_stream, visits_schema)
# For each micro-batch of visit events
visits_stream.writeStream \
.option("checkpointLocation", "checkpoint/visits") \
.foreachBatch(lambda visits_batch, index: Orchestrator.ingest_visits(visits_batch, spark_configuration, index))\
.start()
# Await stream termination
spark_configuration.spark_session.streams.awaitAnyTermination()
if __name__ == "__main__":
main()
|
9,847 | 10d5eef304a3d293441169ebde1f7859537c4b6e | import os
from googleapiclient.discovery import build
import httplib2
from oauth2client import gce
from oauth2client.appengine import AppAssertionCredentials
from oauth2client.file import Storage
__author__ = 'ekampf'
import json
import logging
import apiclient.errors
from apiclient import http as apiclient_request
from apiclient import model as apiclient_model
from .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \
BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError
# pylint: disable=E1002
class BigQueryModel(apiclient_model.JsonModel):
"""Adds optional global parameters to all requests."""
def __init__(self, trace=None, **kwargs):
super(BigQueryModel, self).__init__(**kwargs)
self.trace = trace
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing request."""
if 'trace' not in query_params and self.trace:
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'sourceUris': ['gs:/%s' % s for s in gcs_links],
'destinationTable': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': table_id
},
}
}
}
logging.info('about to insert job:%s', job_data)
try:
job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute()
status = job['status']
if 'errorResult' in status:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference'])
return job
except BigQueryError as ex:
logging.exception(ex)
raise
def monitor_insert_job(self, project_id, job_id):
try:
logging.info('about to monitor job: %s', job_id)
job = self.api_client.jobs().get(project_id, job_id)
logging.info('Got job response: %s', job)
state = job['status']['state']
if state == 'DONE':
logging.info("Job %s is done loading!", job_id)
if 'errorResult' in job['status']:
raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id})
except BigQueryError as ex:
logging.exception(ex)
raise
def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None):
"""Retrieves the results of a query job.
:param project_id: Project ID of the query job.
:param job_id: Job ID of the query job.
:param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.
:param pageToken: string, Page token, returned by a previous call, to request the next page of results
:param maxResults: integer, Maximum number of results to read
:param startIndex: string, Zero-based index of the starting row
:return:
"""
try:
return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex)
except BigQueryError as ex:
logging.exception(ex)
raise
|
9,848 | cfe5d013c968afdbf1fc80e3c8c3233a3678450b | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 14:23:28 2018
@author: emily
"""
import pipeline
import numpy as np
import matplotlib.pyplot as plt
import pstats
import cProfile
pr = cProfile.Profile()
pr.enable()
#def try_running():
max_it=200000
rnd_sd = 1
deps = np.concatenate((np.arange(0,10,0.2), np.arange(10,60,1), np.arange(60,201,5)))
model = pipeline.Model(vs = np.arange(3.5, 4.8, 0.1), all_deps = deps,
idep = np.array([25, 50, 60,70,80,90,100,102,104,106,
108,110,112]),
std_rf = 0, lam_rf = 0, std_swd = 0)
#model = pipeline.Model(vs = np.array([1.8, 2.4, 3.4, 4.5, 4.7, 4.65]), all_deps = deps,
# idep = np.array([10, 32, 41, 60, 96, 120]),
# std_rf = 0, lam_rf = 0, std_swd = 0)
#model = pipeline.Model(vs = np.array([3.4, 4.5]), all_deps = deps,
# idep = np.array([60, 96]),
# std_rf = 0, lam_rf = 0, std_swd = 0)
rf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))
swd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1/np.arange(0.02,0.1, 0.01), 1e6)
all_lims = pipeline.Limits(
vs = (0.5,5.5), dep = (0,200), std_rf = (0,0.05),
lam_rf = (0.05, 0.5), std_swd = (0,0.15))
out = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)
actual_model = pipeline.SaveModel(pipeline.MakeFullModel(model),out[1][:,0])
#%%
all_models = out[1]
good_mods = all_models[:,np.where(all_models[0,]>0)[0]]
nit = good_mods.shape[1]
good_mods = good_mods[:,-int(nit/5):]
mean_mod = np.mean(good_mods, axis = 1)
std_mod = np.std(good_mods, axis = 1)
good_mod = pipeline.Model(vs = mean_mod, all_deps = all_models[:,0],
idep = np.arange(0,mean_mod.size),
lam_rf = 0, std_rf = 0, std_swd = 0)
fullmodel = pipeline.MakeFullModel(good_mod)
fig1 = plt.figure();
ax1 = plt.subplot(121)
for k in range(all_models[1,].size-1):
colstr = str(0.75-k/2/all_models[1,].size)
plt.plot(all_models[:,k],all_models[:,0],
'-',linewidth=1,color=colstr)
ax1.invert_yaxis()
ax1.plot(actual_model,all_models[:,0],'r-',linewidth=3)
ax1.set_xlim((1.5,5))
ax1.set_xlabel('Shear Velocity (km/s)')
ax1.set_ylabel('Depth (km)')
ax1.set_title("{} iterations".format(nit*100))
ax3 = plt.subplot(122)
for k in range(good_mods[0,].size-1):
colstr = str(0.85-k/2/good_mods[0,].size)
ax3.plot(good_mods[:,k],all_models[:,0],
'-',linewidth=1,color=colstr)
ax3.invert_yaxis()
ax3.plot(mean_mod,all_models[:,0],'b-',linewidth = 2)
ax3.plot(mean_mod+std_mod, all_models[:,0],'c-',linewidth = 1)
ax3.plot(mean_mod-std_mod, all_models[:,0],'c-',linewidth = 1)
ax3.plot(actual_model,all_models[:,0],'r--',linewidth=1)
ax3.set_xlim((1.5,5))
ax3.set_xlabel('Shear Velocity (km/s)')
ax3.set_ylabel('Depth (km)')
ax3.set_title('Most recent {}'.format(good_mods.shape[1]))
allvels = np.arange(all_lims.vs[0],all_lims.vs[1],0.01)
evendeps = np.arange(0,all_models[-1,0],0.1)
i_ed = np.zeros(evendeps.shape, dtype = int)
for k in range(all_models[:,0].size-1,0,-1):
i_ed[all_models[k,0]>=evendeps] = k
mod_space = np.zeros((evendeps.size,allvels.size))
for k in range(1,good_mods.shape[1]):
even_vels = good_mods[i_ed,-k]
inds = np.round(even_vels-all_lims.vs[0],2)/0.01
inds = inds.astype(int)
mod_space[range(mod_space.shape[0]),inds] += 1
plt.tight_layout()
fig2 = plt.figure()
ax2 = plt.subplot(121)
ax2.imshow(np.log10(mod_space[-1::-1]+1e-1), cmap = 'viridis', aspect = allvels[-1]/evendeps[-1],
extent = [allvels[0], allvels[-1], evendeps[0], evendeps[-1]])
ax2.invert_yaxis()
ax2.set_xlabel('Shear Velocity (km/s)')
ax2.set_ylabel('Depth (km)')
ax2.xaxis.set_label_position('top')
ax2.xaxis.tick_top()
ax2.set_xlim((1.5,5))
plt.figure(); plt.title('Receiver Function - real: red; synth: grey')
rft = np.arange(0,rf_obs.dt*rf_obs.amp.size,rf_obs.dt)
plt.plot(rft, rf_obs.amp, 'r-', linewidth=2)
synth_rf = pipeline.SynthesiseRF(fullmodel)
plt.plot(rft,synth_rf.amp, '-',color = '0.25', linewidth=1)
synth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1e6)
plt.figure(); plt.title('Surface Wave Dispersion - real: red; synth: grey')
plt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)
plt.plot(synth_swd.period, synth_swd.c, '-',color = '0.25', linewidth=1)
plt.figure(); plt.title("Mahalanobis distance (least squares misfit - phi)")
plt.plot(np.log10(out[2]))
plt.figure(); plt.title("Likelihood of accepting new model - alpha(m|m0)")
plt.plot(np.log10(out[3]))
print(np.mean(out[4]))
#%%
pr.disable()
s=open('thingy4.txt','w')
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
s.close() |
9,849 | 6868a8b5d36403f1417301acdca5f5dc9e45c682 | from __future__ import division
import re
import sys
import six
from six.moves import queue
import os
import io
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.cloud import speech as speech1
from google.cloud.speech import enums as enums2
from google.cloud.speech import types as types2
from google.cloud import speech_v1p1beta1 as speech2
class Google_Cloud:
def __init__(self, text):
print(text)
self.client = language.LanguageServiceClient()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
self.document = types.Document(
content=text.encode('utf-8'),
type=enums.Document.Type.PLAIN_TEXT)
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
def entities(self):
google_entities = self.client.analyze_entities(self.document).entities
entities = []
for entity in google_entities:
entities.append(entity.name.lower())
entities.sort()
return entities
def entity_sentiment(self):
# Detect and send native Python encoding to receive correct word offsets.
encoding = enums.EncodingType.UTF32
if sys.maxunicode == 65535:
encoding = enums.EncodingType.UTF16
result = self.client.analyze_entity_sentiment(self.document, encoding)
entities = {}
for entity in result.entities:
entity_str = ""
entity_str += 'Mentions: '
entity_str += (u'Name: "{}"'.format(entity.name))
name = entity.name
entities[name] = entity.sentiment
return entities
def syntax(self):
"""Detects syntax in the text."""
# Detects syntax in the document. You can also analyze HTML with:
# document.type == enums.Document.Type.HTML
tokens = self.client.analyze_syntax(self.document).tokens
# part-of-speech tags from enums.PartOfSpeech.Tag
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',
'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')
result = []
for token in tokens:
result.append((u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content)))
return result
def categories(self):
"""Classifies content categories of the provided text."""
categories = self.client.classify_text(self.document).categories
result = []
for category in categories:
result.append(category.name)
return result
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
#with io.open(self.audio_file, 'rb') as audio_file:
# content = audio_file.read()
#print(type(content))
#audio = types2.RecognitionAudio(uri=uri)
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
audio_channel_count=2,
enable_separate_recognition_per_channel=True
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.FLAC,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e:
print(e)
else:
return "Please use .wav or .flac audio files"
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(
encoding=speech2.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
|
9,850 | f4b704a1416bfd6524340a68a20981957abf4340 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is the ES library for Apache Kibble.
It stores the elasticsearch handler and config options.
"""
import elasticsearch
from kibble.configuration import KibbleConfigParser
class KibbleESWrapper(object):
"""
Class for rewriting old-style queries to the new ones,
where doc_type is an integral part of the DB name
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + "_" + doc_type, doc_type="_doc", id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + "_" + doc_type, doc_type="_doc", id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + "_" + doc_type, doc_type="_doc", id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(
index=index + "_" + doc_type, doc_type="_doc", id=id, body=body
)
def update(self, index, doc_type, id, body):
return self.ES.update(
index=index + "_" + doc_type, doc_type="_doc", id=id, body=body
)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(
self, index, doc_type, size=100, scroll=None, _source_include=None, body=None
):
return self.ES.search(
index=index + "_" + doc_type,
doc_type="_doc",
size=size,
scroll=scroll,
_source_include=_source_include,
body=body,
)
def count(self, index, doc_type="*", body=None):
return self.ES.count(index=index + "_" + doc_type, doc_type="_doc", body=body)
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + "_" + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + "_" + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + "_" + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + "_" + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + "_" + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(
self, index, doc_type, size=100, scroll=None, _source_include=None, body=None
):
return self.ES.search(
index=index + "_" + doc_type,
size=size,
scroll=scroll,
_source_includes=_source_include,
body=body,
)
def count(self, index, doc_type="*", body=None):
return self.ES.count(index=index + "_" + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get("elasticsearch", "dbname")
self.ES = elasticsearch.Elasticsearch(
[config.get("elasticsearch", "conn_uri")],
use_ssl=config.getboolean("elasticsearch", "ssl"),
verify_certs=False,
max_retries=5,
retry_on_timeout=True,
)
# IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x.
# If so, we're using the new ES DB mappings, and need to adjust ALL
# ES calls to match this.
self.ESversion = int(self.ES.info()["version"]["number"].split(".")[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
|
9,851 | 8ff7ace102b781b35fff0671e2c606bf662e2767 | # Generated by Django 2.0.7 on 2018-09-27 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('education', '0005_auto_20180927_1041'),
]
operations = [
migrations.RemoveField(
model_name='educationgroup',
name='students',
),
migrations.AddField(
model_name='student',
name='education_groups',
field=models.ManyToManyField(blank=True, to='education.EducationGroup', verbose_name='Education Groups'),
),
]
|
9,852 | 6f583fde0eeab84984629b795e428300503a49c9 | from adb_local_installer.connection import ADBConnection
with ADBConnection("a95x01", domain="dohmens.local") as conn:
print(conn.conn) |
9,853 | 6f1bb9fde9ed9667ab81baa9e8ec965d711a0556 | #! /usr/bin/env python3
import os
import requests
# import json
external_ip = "xx"
data_path = "/data/feedback/"
url = "http://{}/feedback/".format(external_ip)
def read():
# read file
file_list = os.listdir(data_path)
result_list = []
for file in file_list:
with open(data_path + file) as f:
# read line, title, name, date, feedback
content = f.readlines()
# envolope to dictionary
dict = {}
dict["title"] = content[0]
dict["name"] = content[1]
dict["date"] = content[2]
dict["feedback"] = content[3]
result_list.append(dict)
f.close()
return result_list
def send(list):
for dict in list:
response = requests.post(url, json=dict)
if(response.status_code == 200):
forDEBUG("SEND_SUCC", dict["title"])
else:
forDEBUG("SEND_FAIL", dict["title"])
def forDEBUG(p1, p2):
print("DEBUG:: {}, {}".format(p1, p2))
def action():
plist = read()
send(plist)
action() |
9,854 | e982fd5bed540b836fd4e2caaec033d8cbfb0e4f | from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from .models import *
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from django.template.loader import get_template
from django.template import Context
from django.views.decorators.csrf import csrf_exempt
from django.template.context_processors import csrf
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import xml.etree.ElementTree as etree
from xml.dom.minidom import Document, parse
import xml.dom.minidom as dom
import datetime
import sys
from .parser import get_data
from django.http import QueryDict
import urllib
# Create your views here.
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
@csrf_exempt
def loginuser(request):
username = request.POST['Usuario']
password = request.POST['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request,user)
direcc = '/' + str(user)
return redirect(direcc)
else:
Error = "Por favor, introduzca un usuario y contraseña válidos"
template = get_template("fail.html")
c = Context ({'Error': Error})
renderizado = template.render(c)
return HttpResponse(renderizado)
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by("-contador_megusta")[:5]
Response = "LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>"
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
#comentarios = Comentario.objects.filter(aparcamiento=i)
if megustas != 0:
Response += "<li><a href=" + i.content_url + ">" + i.nombre + "<br></a>"
Response += "Dirección: " + i.clase_vial + " " + i.localizacion + ", nº " + str(i.num)
Response += "<br><a href=http://localhost:1234/aparcamientos/" + i.entidad + ">" + "Más información<br></a><br>"
Existe = True
if Existe == False:
Response += "Aún no se han registrado comentarios para ningún aparcamiento"
Response += "</br></br>"
return Response
def paginas_personales():
Lista = "PÁGINAS DE USUARIOS<br><br>"
usuarios = User.objects.all()
for i in usuarios:
try:
pagina = Usuario.objects.get(nombre=i.id).titulo_pagina
except ObjectDoesNotExist:
pagina = "Página de " + i.username
Lista += "<a href=http://localhost:1234/" + i.username + ">" + pagina + "</a> Usuario: " + i.username + "<br>"
return Lista
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += '<li><p>' + nombre_aparcamiento + '<a href="' + url_aparcamiento + '"> --> Más información</a></p></li>'
return lista
def aparcamientos_seleccionados(user,request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados,5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
seleccionados = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
seleccionados = paginator.page(paginator.num_pages)
lista = "Listado de aparcamientos seleccionados por " + user + "<br>"
for i in seleccionados:
lista += "<br><li>Fecha de selección: " + str(i.fecha_seleccion)
lista += "<br><a href=" + i.aparcamiento.content_url + ">" + i.aparcamiento.nombre + "<br></a>"
lista += "Dirección: " + i.aparcamiento.clase_vial + " " + i.aparcamiento.localizacion + ", nº " + str(i.aparcamiento.num)
lista += "<br><a href=http://localhost:1234/aparcamientos/" + i.aparcamiento.entidad + ">" + "Más información</a><br>"
except ObjectDoesNotExist:
lista = "El usuario aún no ha seleccionado ningún aparcamiento"
seleccionados = ""
return lista,seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template("index.html")
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += "<li><p>" + nombre_aparcamiento + "</p><a href=" + url_aparcamiento + ">" + url_aparcamiento + "</a></li>"
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += '<li><p>' + nombre_aparcamiento + '. URL del aparcamiento: ' + '<a href="aparcamientos/' + url_aparcamiento + '"> ⇾ Más información</a></br></p>'
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users':lista, 'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list':list_megustas, 'list_users':users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
def mylogout(request):
logout(request)
return redirect("/")
@csrf_exempt
def usuarios(request, peticion):
formulario = '<form action="" method="POST">'
formulario += '<br>Introduzca un título nuevo a su página personal<br><input type="text" name="Titulo">'
formulario += '<input type="submit" value=" Actualizar"></form>'
css = '<form action="" method="POST">'
css += 'Modifique el tamaño de letra<br><input type="text" name="Letra">'
css += '<br><br>Modifique el color de letra <input type="color" name="Color"><br>'
css += '<br><input type="submit" value="Modificar"></form>'
aparcamientos = Aparcamiento.objects.all()
lista= "<br>LISTADO DE APARCAMIENTOS<br><br>"
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
lista += nombre_aparcamiento
lista += '<form action="" method="POST">'
lista += '<button type="submit" name="Seleccionar" value="' + nombre_aparcamiento + '">Seleccionar</button><br></form>'
user_object= User.objects.get(username=peticion)
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
if key == "Titulo":
titulo = request.POST['Titulo']
try:
user = Usuario.objects.get(nombre=user_object)
user.titulo_pagina = titulo
user.save()
except ObjectDoesNotExist:
p = Usuario(nombre=user_object, titulo_pagina=titulo)
p.save()
elif key == "Seleccionar":
nombre_aparcamiento = request.POST['Seleccionar']
today = datetime.datetime.today()
try:
selector = Usuario.objects.get(nombre=user_object)
aparcamiento = Aparcamiento.objects.get(nombre=nombre_aparcamiento)
except:
p = Usuario(nombre=user_object)
p.save()
selector = Usuario.objects.get(nombre=user_object)
Check = False
lista_usuario = Seleccionados.objects.filter(selector=selector)
for i in lista_usuario:
if nombre_aparcamiento == i.aparcamiento.nombre:
Check=True
if Check == False:
p = Seleccionados(aparcamiento=aparcamiento, selector=selector, fecha_seleccion=today)
p.save()
elif key == "Letra":
letra = request.POST['Letra']
color = request.POST['Color']
try:
user = Usuario.objects.get(nombre=user_object)
except:
p = Usuario(nombre=user_object)
p.save()
user = Usuario.objects.get(nombre=user_object)
if letra == "":
letra = "15"
user.letra = letra
user.color = color
user.save()
lista_seleccionados, seleccionados= aparcamientos_seleccionados(peticion,request)
if request.user.is_authenticated():
username = str(request.user)
if peticion != username: #Si no es igual es que solo puedo acceder a la parte publica, ya qu eno es la mia
template = get_template("publicuser.html")
titulo_pagina = "Página pública de " + peticion + "<br><br>"
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})
else: #Si es igual es que es la mia y puedo acceder a la parte privada, ya que es lamia
template = get_template("privateuser.html")
try:
titulo_pagina = Usuario.objects.get(nombre=user_object).titulo_pagina
except ObjectDoesNotExist:
titulo_pagina = "Página personal de " + str(request.user) + "<br><br>"
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'lista': lista, 'form': formulario, 'css':css, 'titulo': titulo_pagina})
else:
template = get_template("publicuser.html")
titulo_pagina = "Página pública de " + peticion + "<br><br>"
form_user = 'Para loguearse vaya al botón de Inicio'
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def personalizar(request):
if request.user.is_authenticated():
user_object = User.objects.get(username=request.user)
user = Usuario.objects.get(nombre=user_object)
letra = user.letra
color = user.color
else:
letra = "14px"
color = "#FCFCFC"
css = get_template("change.css")
c = Context({'letra':letra, 'color':color})
renderizado = css.render(c)
return HttpResponse(renderizado, content_type="text/css")
def usuarios_xml(request, peticion):
user_object = User.objects.get(username=peticion)
doc = Document()
cont = doc.createElement("Contenidos")
doc.appendChild(cont)
info = doc.createElement("infoDataset")
cont.appendChild(info)
nombre = doc.createElement("Nombre")
info.appendChild(nombre)
ptext = doc.createTextNode("XML de aparcamientos seleccionados por el usuario " + peticion)
nombre.appendChild(ptext)
url = doc.createElement("url")
info.appendChild(url)
ptext = doc.createTextNode("http://localhost:1234/" + peticion + "/xml/")
url.appendChild(ptext)
aparc = doc.createElement("Aparcamientos")
cont.appendChild(aparc)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
for i in lista_seleccionados:
item = doc.createElement("Contenido")
aparc.appendChild(item)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "ID-ENTIDAD")
ptext = doc.createTextNode(i.aparcamiento.entidad)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "NOMBRE")
ptext = doc.createTextNode(i.aparcamiento.nombre)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "DESCRIPCION")
ptext = doc.createTextNode(i.aparcamiento.descripcion)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "ACCESIBILIDAD")
if i.aparcamiento.accesibilidad == True:
acces = 1
else:
acces = 0
ptext = doc.createTextNode(str(acces))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CONTENT_URL")
ptext = doc.createTextNode(i.aparcamiento.content_url)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "LOCALIZACION")
ptext = doc.createTextNode(i.aparcamiento.localizacion)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CLASE VIAL")
ptext = doc.createTextNode(i.aparcamiento.clase_vial)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "TIPO NUM")
ptext = doc.createTextNode(i.aparcamiento.tipo_num)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "NUM")
ptext = doc.createTextNode(str(i.aparcamiento.num))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "LOCALIDAD")
ptext = doc.createTextNode(i.aparcamiento.localidad)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "PROVINCIA")
ptext = doc.createTextNode(i.aparcamiento.provincia)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CODIGO POSTAL")
ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "BARRIO")
ptext = doc.createTextNode(i.aparcamiento.barrio)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "DISTRITO")
ptext = doc.createTextNode(i.aparcamiento.distrito)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "COORDENADA X")
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "COORDENADA Y")
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
datos = doc.createElement("DATOSDECONTACTO")
item.appendChild(datos)
atributo = doc.createElement("atributo")
datos.appendChild(atributo)
atributo.setAttribute("nombre", "TELEFONO")
ptext = doc.createTextNode(i.aparcamiento.telefono)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
datos.appendChild(atributo)
atributo.setAttribute("nombre", "EMAIL")
ptext = doc.createTextNode(i.aparcamiento.email)
atributo.appendChild(ptext)
except:
print("")
xml = doc.toprettyxml(indent=" ")
return HttpResponse(xml, content_type = "text/xml")
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template("aparcamientos.html")
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
else:
form_user = "Para loguearse vaya al botón de Inicio"
if request.method == "POST":
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = "No ha introducido ningún filtro, introduzca distrito para filtrar " + lista
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = "Los aparcamientos en el " + filtro_distrito + " son: "
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += "<p>" + nombre_aparcamiento + "</p><li><a href=" + url_aparcamiento + ">" + url_aparcamiento + "</a></li>"
if Encontrado == False: #No es un distrito válido el que se ha introducido y no ha entrado por el bucle anterior
lista_filtrada = "Introduzca un nuevo distrito. " + filtro_distrito + " no es válido"
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login':form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template("aparcamientos.html")
num_megustas = 0
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
print(key)
#tipo = request.POST
#print(tipo)
#qd = urllib.unquote(tipo).decode("utf-8")
#qd = QueryDict(tipo).decode("utf-8")
#qd.getlist('Me Gusta')
#print(qd)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario (aparcamiento= aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = "No disponible"
if email == '':
email = "No disponible"
if accesibilidad == 1:
acces = "Libre"
else:
acces = "Ocupado"
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ""
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = "<li><p>COMENTARIOS</p><ol>"
for j in comentarios:
list_coments += "<li>" + j.coment + "<br>"
Response = "<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: " + recurso + "</br></p>"
Response += "<a href=" + i.content_url + ">" + i.nombre + "</a><br>"
Response += "Descripción: " + descripcion + "</br>"
Response += "Accesibilidad: " + acces + "</br>"
Response += "Localización: " + via + " " + localizacion + ", nº " + str(num)
Response += " " + localidad + " (" + str(codigo_postal) + ")</br>"
Response += "Ubicación: " + barrio + " " + distrito + " Coordenadas: " + str(coordenada_x) + " , " + str(coordenada_y) + "<br><br>"
Response += "INFORMACIÓN DE CONTACTO </br>"
Response += "Teléfono: " + telefono + "</br>"
Response += "Email: " + email + "</br>" + list_coments + "</ol>"
if num_megustas != 0:
Response += "</br><li>Numero de me gustas es: " + str(num_megustas) + "<br>"
else:
Response += "</br><li>Se el primero en indicar que te gusta la página<br>"
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
formulario = '<form action="" method="POST">'
formulario += '<br>Puede introducir un comentario si lo desea ' + str(request.user) + '<br><input type="text" name="Comentario">'
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = "Para loguearse vaya al botón de Inicio"
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += '<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
Response += megusta
except ObjectDoesNotExist:
Response = "Este id no se corresponde con ningún aparcamiento"
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def about(request):
template = get_template("about.html")
Cuerpo = "DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>"
Cuerpo += "------------------------------------ Página principal ---------------------------------------------------"
Cuerpo += "<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>"
Cuerpo += "<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>"
Cuerpo += "<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>"
Cuerpo += "<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>"
Cuerpo += "------------------------------------ Página con los aparcamientos ---------------------------------------------------"
Cuerpo += "<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>"
Cuerpo += "<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>"
Cuerpo += "<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>"
Cuerpo += "------------------------------------ Interfaz pública de usuario ---------------------------------------------------"
Cuerpo += "<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>"
Cuerpo += "<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>"
Cuerpo += "------------------------------------ Interfaz privada de usuario ---------------------------------------------------"
Cuerpo += "<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>"
Cuerpo += "<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>"
Cuerpo += "<li> Formulario para cambiar el título de su página personal.</li>"
Cuerpo += "<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>"
Cuerpo += "<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>"
Cuerpo += "------------------------------------ Pie de pagina ---------------------------------------------------"
Cuerpo += "<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>"
Cuerpo += "<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>"
Cuerpo += "------------------------------------ Página XML de un usuario ---------------------------------------------------"
Cuerpo += "<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>"
c = Context({'lista': Cuerpo})
renderizado = template.render(c)
return HttpResponse(renderizado)
|
9,855 | 8c86c0969c47a59db5bd147d3e051a29118d6bf2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# looping.py
#
# Copyright 2012 Jelle Smet <development@smetj.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#import uvent
#uvent.install()
from gevent import spawn, sleep
from sys import stdout
def looper0():
while True:
for _ in range(100):
stdout.write ("0")
sleep(0)
def looper1():
while True:
for _ in range(100):
stdout.write ("1")
sleep(0)
def main():
spawn(looper0)
spawn(looper1)
sleep(5)
if __name__ == '__main__':
main()
|
9,856 | 890d50c741ffd576312c63dc450e274b4517bf12 | from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import numpy as np
def preprocess_transformers(y_train, transf):
if transf != 'ln':
if transf == 'minmax':
scaler = MinMaxScaler()
scaler2 = MinMaxScaler()
elif transf == 'standard':
scaler = StandardScaler()
scaler2 = StandardScaler()
elif transf == 'robust':
scaler = RobustScaler()
scaler2 = RobustScaler()
elif transf == 'boxcox':
scaler = PowerTransformer(method='yeo-johnson')
scaler2 = PowerTransformer(method='yeo-johnson')
mm_scaler2 = scaler2.fit(y_train)
y_train = mm_scaler2.transform(y_train)
else:
# y_train = y_train.values
y_train = np.log(y_train).values
mm_scaler2 = ''
return y_train, mm_scaler2
def transformacion_inversa(y_predict, mm_scaler2):
if mm_scaler2 != '':
y_predict = mm_scaler2.inverse_transform(pd.DataFrame(y_predict))
else:
y_predict = np.exp(y_predict)
# y_predict = y_predict
return y_predict
def predict_model(config, model):
if type(config) == dict:
df = pd.DataFrame(config, index=[0])
else:
df = config
print(f'df: {df}')
# prepared_df, scaler = preprocess_transformers(df, 'minmax')
prepared_df = df
y_pred = model.predict(prepared_df)
print(f'y_pred {y_pred}')
# print(f'scaler {scaler}')
return y_pred
# return 1
|
9,857 | 30405a6f20a44b2252b6894ef6d0e818861702f8 | import sys
sys.path.append('preprocess')
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
import numpy as np
import refit_cfg
import os
import random
from sklearn.model_selection import train_test_split
name = ['WashingMachine', 'Kettle', 'Microwave', 'Fridge', 'Dishwasher']
appliance_dict = {
'WashingMachine': refit_cfg.washingmachine,
'Kettle': refit_cfg.kettle,
'Microwave': refit_cfg.microwave,
'Fridge': refit_cfg.fridge,
'Dishwasher': refit_cfg.dishwasher
}
def align_process(house_id):
data = np.load('data\\REFIT\\original_data\\%d.npy' % house_id)
new_data = []
current_index = 0
current_time = int(data[0][0])
end_time = int(data[-1][0]) + 8
interval_threshold = refit_cfg.separation_threshold
isend = 0
data_length = len(data)
while current_time <= end_time:
current_interval = int(data[current_index+1][0]) - int(data[current_index][0])
if current_interval < interval_threshold: # small interval
if current_time > int(data[current_index][0]):
temp_index = current_index + 1
while current_time > int(data[temp_index][0]):
temp_index += 1
if temp_index > (data_length-1):
temp_index -= 1
break
if abs(current_time - int(data[temp_index-1][0])) > abs(int(data[temp_index][0])-current_time):
current_index = temp_index
if temp_index == (data_length-1):
print('The end!')
isend = 1
else:
current_index = temp_index - 1
t = []
for element in data[current_index]:
t.append(element)
t[0] = current_time
new_data.append(t)
if isend == 1:
break
current_time += 8
if current_index % 1000 == 0:
print('House %d processing: %f' % (house_id, current_index/data_length))
else: # big interval
current_index += 1
current_time = int(data[current_index][0])
np.save('data\\REFIT\\after_align\\%d.npy' % house_id, new_data)
def visual(house_id, channel_id, start, length):
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
print(len(data))
target = []
c = channel_id+1
for r in data:
target.append(int(r[c]))
y = target[start:start+length]
plt.plot(y)
plt.show()
def diff(house_id):
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
d = []
for i in range(len(data)-1):
d.append(int(data[i+1][0])-int(data[i][0]))
plt.plot(d)
plt.show()
plt.close()
def appliance_separation(dict, appliance_name):
path = 'data\\REFIT\\appliance_data\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, channel_id in dict.items():
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
appliance_data = []
for row in data:
appliance_data.append([row[1], row[channel_id+1]])
np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)), appliance_data)
print('Appliance %s House %d complete!' % (appliance_name, house_id))
def show_appliance(house_id, appliance_name):
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_align\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
print(len(data))
mains = []
app = []
for i in data:
mains.append(int(i[0]))
app.append(int(i[1]))
plt.figure(figsize=(20, 8))
plt.plot(mains)
plt.plot(app)
plt.show()
def cull(cull_dict):
for appliance_name, _dict in cull_dict.items():
path = 'data\\REFIT\\after_culling\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, cull_list in _dict.items():
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_align\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
new_data = []
_cull_list = [[0, cull_list[0][0]]]
for i in range(len(cull_list)-1):
_cull_list.append([cull_list[i][1], cull_list[i+1][0]])
_cull_list.append([cull_list[-1][1], (len(data)-1)])
for i in _cull_list:
if i[1] - i[0] != 0:
for j in range(i[0], i[1]):
new_data.append(data[j])
np.save('data\\REFIT\\after_culling\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id), new_data)
print('House %d %s complete!' % (house_id, appliance_name))
def appliance_separation(dict, appliance_name):
"""
将各个电器的数据进行分解,放置到appliance_data文件夹下对应电器的文件夹中,以house_id和channel_id进行命名
:param dict: 电器数据来源
:param appliance_name: 当前电器的名称,用以创建文件夹
:return:
"""
path = 'data\\REFIT\\appliance_data\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, channel_id in dict.items():
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
appliance_data = []
for row in data:
appliance_data.append([row[1], row[channel_id+1]]) # 将mains 和 appliance 作为一条单独的记录
np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)), appliance_data)
print('Appliance %s House %d complete!' % (appliance_name, house_id))
def show_appliance(house_id, appliance_name):
"""
具体观察每个电器的图形表示,将大段的数据缺失或者数据错误进行标注,构造cull_dict字典,在cull进行片段删除
:param house_id:
:param appliance_name:
:return:
"""
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_culling\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
print(len(data))
mains = []
app = []
for i in data:
mains.append(int(i[0]))
app.append(int(i[1]))
plt.figure(figsize=(20, 8))
plt.plot(mains)
plt.plot(app)
plt.show()
def cull(cull_dict):
"""
根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对
:param cull_dict:
:return:
"""
for appliance_name, _dict in cull_dict.items():
path = 'data\\REFIT\\after_culling_2\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, cull_list in _dict.items():
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_culling_2\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
new_data = []
# 对cull_list进行变形,变成表征合理数据的区间
_cull_list = [[0, cull_list[0][0]]]
for i in range(len(cull_list)-1):
_cull_list.append([cull_list[i][1], cull_list[i+1][0]])
_cull_list.append([cull_list[-1][1], (len(data)-1)])
for i in _cull_list:
if i[1] - i[0] != 0:
for j in range(i[0], i[1]):
new_data.append(data[j])
np.save('data\\REFIT\\after_culling_2\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id), new_data)
print('House %d %s complete!' % (house_id, appliance_name))
def separate(appliance_name):
window_width = refit_cfg.window_width[appliance_name]
data_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
count = 0
appliance_train_validation = []
appliance_test = []
main_train_validation = []
main_test = []
for house_id, channel_id in refit_cfg.train_validation[appliance_name].items():
# train & validation
appliance_train_validation.clear()
main_train_validation.clear()
data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id, channel_id)))
current_head = 0
data_length = len(data)
end = data_length - window_width - 1
while current_head < end:
temp_main = []
temp_appliance = []
for i in range(current_head, current_head+window_width):
temp_main.append(data[i][0])
temp_appliance.append(data[i][1])
r = random.random()
current_head += int(window_width*r)
appliance_train_validation.append(temp_appliance)
main_train_validation.append(temp_main)
count += 1
if count % 1000 == 0:
print('T & V 1: House %d %f' % (house_id, (current_head / data_length)))
data_length -= window_width
random_clip = refit_cfg.random_clip[appliance_name]
for i in range(random_clip):
r = random.random()
start = int(r*data_length)
temp_main = []
temp_appliance = []
for j in range(start, start + window_width):
temp_main.append(data[j][0])
temp_appliance.append(data[j][1])
appliance_train_validation.append(temp_appliance)
main_train_validation.append(temp_main)
count += 1
if count % 1000 == 0:
print('T & V 2: House %d %f' % (house_id, (i / random_clip)))
print('Train & Validation: House %d %s complete!' % (house_id, appliance_name))
np.save(os.path.join(data_path, '1024\\appliance_train_validation_%d.npy' % house_id), appliance_train_validation)
np.save(os.path.join(data_path, '1024\\main_train_validation_%d.npy' % house_id), main_train_validation)
# test
count = 0
for house_id, channel_id in refit_cfg.test[appliance_name].items():
appliance_test.clear()
main_test.clear()
data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id, channel_id)))
current_head = 0
data_length = len(data)
end = data_length - window_width - 1
while current_head < end:
temp_main = []
temp_appliance = []
for i in range(current_head, current_head+window_width):
temp_main.append(data[i][0])
temp_appliance.append(data[i][1])
r = random.random()
current_head += int(r*window_width)
appliance_test.append(temp_appliance)
main_test.append(temp_main)
count += 1
if count % 1000 == 0:
print('Test 1: House %d %f' % (house_id, (current_head / data_length)))
data_length -= window_width
for i in range(refit_cfg.random_clip[appliance_name]):
r = random.random()
start = int(r*data_length)
temp_main = []
temp_appliance = []
for j in range(start, start + window_width):
temp_main.append(data[j][0])
temp_appliance.append(data[j][1])
appliance_test.append(temp_appliance)
main_test.append(temp_main)
count += 1
if count % 1000 == 0:
print('Test 2: House %d %f' % (house_id, (i / data_length)))
print('Test 2: House %d %s complete!' % (house_id, appliance_name))
np.save(os.path.join(data_path, '1024\\appliance_test_%d.npy' % house_id), appliance_test)
np.save(os.path.join(data_path, '1024\\main_test_%d.npy' % house_id), main_test)
def clip_visual(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train_.npy'))
main_data = np.load(os.path.join(base_path, 'main_train_.npy'))
print('Data load complete!')
loop = 1000
x = np.linspace(256, 768, 512)
length = len(appliance_data)
for i in range(loop):
r = int(random.random()*length)
plt.figure(figsize=(25, 10), dpi=100)
plt.subplot(211)
plt.xlim(0, 1024)
plt.plot(main_data[r])
plt.subplot(212)
plt.xlim(0, 1024)
plt.plot(x, appliance_data[r])
savefig(os.path.join(base_path, 'clip_view\\%d.jpg' % i))
plt.close()
def train_validation_split(appliance_name):
data_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance = np.load(os.path.join(data_path, 'appliance_train_validation.npy'))
main = np.load(os.path.join(data_path, 'main_train_validation.npy'))
appliance_train, appliance_validation, main_train, main_validation = \
train_test_split(appliance, main, test_size=0.2)
print(len(appliance_train))
print(len(main_train))
np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)
np.save(os.path.join(data_path, 'main_train.npy'), main_train)
np.save(os.path.join(data_path, 'appliance_validation.npy'), appliance_validation)
np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)
def data_integration(appliance_name):
data_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance = []
main = []
for house_id, channel_id in refit_cfg.train_validation[appliance_name].items():
appliance_data = np.load(os.path.join(data_path, 'appliance_train_validation_%d.npy' % house_id))
main_data = np.load(os.path.join(data_path, 'main_train_validation_%d.npy' % house_id))
for i in appliance_data:
appliance.append(i)
for i in main_data:
main.append(i)
print(len(appliance))
print(len(main))
np.save(os.path.join(data_path, 'appliance_train_validation.npy'), appliance)
np.save(os.path.join(data_path, 'main_train_validation.npy'), main)
appliance_test = []
main_test = []
for house_id, channel_id in refit_cfg.test[appliance_name].items():
appliance_data = np.load(os.path.join(data_path, 'appliance_test_%d.npy' % house_id))
main_data = np.load(os.path.join(data_path, 'main_test_%d.npy' % house_id))
for i in appliance_data:
appliance_test.append(i)
for i in main_data:
main_test.append(i)
print(len(appliance_test))
print(len(main_test))
np.save(os.path.join(data_path, 'appliance_test.npy'), appliance_test)
np.save(os.path.join(data_path, 'main_test.npy'), main_test)
def positive_negative(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))
count = 0
threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
d = {}
for i in range(len(threshold)):
d[threshold[i]] = 0
print(d)
for th in threshold:
for i in appliance_data:
sum = 0
for j in i:
sum += int(j)
if sum > th:
d[th] += 1
print('Thres %d complete!' % th)
for thres, count in d.items():
print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data), count/len(appliance_data)))
def clip_view(appliance_name, thres):
base_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))
count = 0
for i in appliance_data:
sum = 0
for j in i:
sum += int(j)
if sum > thres:
plt.figure(figsize=(25, 10), dpi=100)
plt.plot(i.astype(int))
savefig(os.path.join(base_path, 'clip_view\\%d.jpg' % count))
plt.close()
count += 1
def test_process(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))
temp = [0.0]*512
new_app = []
for i in range(len(appliance_data)):
max = np.max(appliance_data[i])
if max < 0.05:
print(max)
new_app.append(temp)
else:
new_app.append(appliance_data[i])
np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)
def separate_positive_negative(appliance_name, thres, peak):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))
main_data = np.load(os.path.join(base_path, 'main_train.npy'))
count = 0
appliance_positive = []
appliance_negative = []
main_positive = []
main_negative = []
appliance_temp = [0] * 1024
for i in range(len(appliance_data)):
sum = 0
max = 0
for j in appliance_data[i]:
sum += int(j)
for j in range(512):
if int(appliance_data[i][j+256]) > max:
max = int(appliance_data[i][j+256])
if max < peak:
sum = 0
if sum > thres:
appliance_positive.append(appliance_data[i])
main_positive.append(main_data[i])
else:
appliance_negative.append(appliance_temp)
main_negative.append(main_data[i])
if i % 1000 == 0:
print('Processing: %f' % (i/len(appliance_data)))
np.save(os.path.join(base_path, 'appliance_positive.npy'), appliance_positive)
np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)
np.save(os.path.join(base_path, 'appliance_negative.npy'), appliance_negative)
np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)
def generate_balanced_dataset(appliance_name, negative_ratio):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_positive = list(np.load(os.path.join(base_path, 'appliance_positive.npy')))
appliance_negative = np.load(os.path.join(base_path, 'appliance_negative.npy'))
main_positive = list(np.load(os.path.join(base_path, 'main_positive.npy')))
main_negative = np.load(os.path.join(base_path, 'main_negative.npy'))
print('Data load complete!')
positive_length = len(appliance_positive)
negative_length = len(appliance_negative)
print('Postive length: %d negative length: %d' % (positive_length, negative_length))
for i in range(int(positive_length*negative_ratio)):
r = int(random.random()*negative_length)
appliance_positive.append(appliance_negative[r])
main_positive.append(main_negative[r])
print('Data generate complete! length: %d' % (len(appliance_positive)))
index = np.linspace(0, len(appliance_positive)-1, len(appliance_positive)).astype(int)
random.shuffle(index)
appliance_new = []
main_new = []
for i in index:
appliance_new.append(appliance_positive[i])
main_new.append(main_positive[i])
print('Data shuffle complete!')
np.save(os.path.join(base_path, 'appliance_train_balanced.npy'), appliance_new)
np.save(os.path.join(base_path, 'main_train_balanced.npy'), main_new)
print('Data save complete!')
def shrink(appliance_name, scale):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train_balanced.npy'))
main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))
appliance_new = []
main_new = []
print('Data load complete!')
for i in range(len(appliance_data)):
appliance_temp = []
main_temp = []
for j in range(len(appliance_data[i])):
appliance_temp.append(float(int(appliance_data[i][j])/scale))
for j in range(len(main_data[i])):
main_temp.append(float(int(main_data[i][j])/scale))
appliance_new.append(appliance_temp)
main_new.append(main_temp)
print('Process complete!')
np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale), appliance_new)
np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)
def shrink_validation(appliance_name, scale):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_validation.npy'))
main_data = np.load(os.path.join(base_path, 'main_validation.npy'))
appliance_new = []
main_new = []
print('Data load complete!')
for i in range(len(appliance_data)):
appliance_temp = []
main_temp = []
for j in range(len(appliance_data[i])):
appliance_temp.append(float(int(appliance_data[i][j])/scale))
for j in range(len(main_data[i])):
main_temp.append(float(int(main_data[i][j])/scale))
appliance_new.append(appliance_temp)
main_new.append(main_temp)
print('Process complete!')
np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale), appliance_new)
np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new)
def appliance_1024to512(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_train = np.load(os.path.join(base_path, 'appliance_train_1000.npy'))
appliance_validation = np.load(os.path.join(base_path, 'appliance_validation_1000.npy'))
appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy'))
at_new = []
av_new = []
ae_new = []
for i in range(len(appliance_train)):
at_temp = []
for j in range(256, 768):
at_temp.append(float(appliance_train[i][j]))
at_new.append(at_temp)
for i in range(len(appliance_validation)):
av_temp = []
for j in range(256, 768):
av_temp.append(float(appliance_validation[i][j]))
av_new.append(av_temp)
for i in range(len(appliance_test)):
ae_temp = []
for j in range(256, 768):
ae_temp.append(float(appliance_test[i][j]))
ae_new.append(ae_temp)
np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)
np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)
np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)
def shrink_test(appliance_name, scale):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))
main_data = np.load(os.path.join(base_path, 'main_test.npy'))
appliance_new = []
main_new = []
print('Data load complete!')
for i in range(len(appliance_data)):
appliance_temp = []
main_temp = []
for j in range(len(appliance_data[i])):
appliance_temp.append(float(int(appliance_data[i][j])/scale))
for j in range(len(main_data[i])):
main_temp.append(float(int(main_data[i][j])/scale))
appliance_new.append(appliance_temp)
main_new.append(main_temp)
print('Process complete!')
np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)
np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)
if __name__ == '__main__':
appliance_name = 'WashingMachine'
separate(appliance_name)
data_integration(appliance_name)
train_validation_split(appliance_name)
separate_positive_negative(appliance_name, 1500, 20)
generate_balanced_dataset(appliance_name, 1)
shrink(appliance_name, 1000)
shrink_validation(appliance_name, 1000)
shrink_test(appliance_name, 1000)
appliance_1024to512(appliance_name)
# test_process(appliance_name)
print('Process complete!!!')
|
9,858 | 5ae4f489da7b4f0913c9b16c86cc60537cc51234 | import plotly.figure_factory as ff
import pandas as pd
import csv
df=pd.read_csv("phone.csv")
fig=ff.create_distplot([df["Avg Rating"].tolist()],["Samsung"],show_hist=False)
fig.show() |
9,859 | d76c1507594bb0c1ed7a83e6c5961097c7fbf54a | from django.urls import path
from django.contrib.auth import views as auth_views
from . views import register, channel
urlpatterns = [
path('register/', register, name="register"),
path('channel/', channel, name="channel"),
path('login/', auth_views.LoginView.as_view(template_name='user/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='user/logout.html'), name='logout'),
path('password_reset/',auth_views.PasswordResetView.as_view(template_name="user/password_reset.html"), name="password_reset"),
path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(template_name="user/password_reset_done.html"), name="password_reset_done"),
path('password_reset_confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name="user/password_reset_confirm.html"),name="password_reset_confirm"),
path('password_reset_complete/',auth_views.PasswordResetCompleteView.as_view(template_name="user/password_reset_complete.html"),name="password_reset_complete"),
]
|
9,860 | 6fa0e1dabd178507c32c62146b404bb42f8445d4 | import pandas as pd
import numpy as np
#from ctaFunction import std_normalized
def barStdNormal(bars, timeperiod=5):
'''Std Normal '''
close = bars['close']
result = close.rolling(timeperiod).apply(std_normalized)
return result |
9,861 | 1077efaa4379ff0e114a0b8d4d3b7156758e070f | # coding: utf-8
from korean.morphophonemics.phonology import Syllable
from notes.old_morphology import Noun, Verb
class Case (object):
pass
class Nominative (Case):
def apply(self, noun):
if noun.has_tail():
noun.syllables.append(Syllable(u'이'))
else:
noun.syllables.append(Syllable(u'가'))
class Accusative (Case):
def apply(self, noun):
if noun.has_tail():
noun.syllables.append(Syllable(u'을'))
else:
noun.syllables.append(Syllable(u'를'))
class Locative (Case):
def apply(self, noun):
noun.syllables.append(Syllable(u'에'))
noun.syllables.append(Syllable(u'서'))
class Sentence (object):
def __init__(self, subject=None, object=None, location=None, verb=None):
self.subject = subject
self.object = object
self.location = location
self.verb = verb
self.assign_cases()
self.analyze()
def assign_cases(self):
try:
self.subject.set_case(Nominative())
except AttributeError:
pass
try:
self.object.set_case(Accusative())
except AttributeError:
pass
try:
self.location.set_case(Locative())
except AttributeError:
pass
def analyze(self):
pass
def __str__(self):
sentence = ' '.join([
str(part) for part in [
self.subject,
self.location,
self.object,
self.verb,
] if part
])
return sentence
def main():
dave = Noun(u'데이브')
emma = Noun(u'연정')
elly = Noun(u'엘리')
house = Noun(u'집')
treat = Noun(u'트리트')
lunch = Noun(u'점심')
eat = Verb(u'머거요')
s1 = Sentence(subject=emma, verb=eat)
s2 = Sentence(subject=dave, object=lunch, verb=eat)
s3 = Sentence(subject=elly, object=treat, location=house, verb=eat)
print s1
print s2
print s3
if __name__ == '__main__': main()
|
9,862 | 19c1a50cf19f04a9e0d0163a9383cb900bca1d38 | #!/usr/bin/env python3
import click
@click.command()
@click.option("--name", prompt = "Your name")
def hello(name):
print("hello", name)
if __name__ == '__main__':
hello()
|
9,863 | a8197a4f0bb84e734696bf43fa976c76732d75b8 | from django.contrib import admin
from trips.models import Post
admin.site.register(Post)
|
9,864 | fb258521fdfded0062cbe30651268bf5410d3384 | # coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from .. import _serialization
if sys.version_info >= (3, 9):
from collections.abc import MutableMapping
else:
from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object
class AnswersFromTextOptions(_serialization.Model):
"""The question and text record parameters to answer.
All required parameters must be populated in order to send to Azure.
:ivar question: User question to query against the given text records. Required.
:vartype question: str
:ivar text_documents: Text records to be searched for given question. Required.
:vartype text_documents: list[~azure.ai.language.questionanswering.models.TextDocument]
:ivar language: Language of the text records. This is BCP-47 representation of a language. For
example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:vartype language: str
"""
_validation = {
"question": {"required": True},
"text_documents": {"required": True},
}
_attribute_map = {
"question": {"key": "question", "type": "str"},
"text_documents": {"key": "records", "type": "[TextDocument]"},
"language": {"key": "language", "type": "str"},
}
def __init__(
self,
*,
question: str,
text_documents: List["_models.TextDocument"],
language: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword question: User question to query against the given text records. Required.
:paramtype question: str
:keyword text_documents: Text records to be searched for given question. Required.
:paramtype text_documents: list[~azure.ai.language.questionanswering.models.TextDocument]
:keyword language: Language of the text records. This is BCP-47 representation of a language.
For example, use "en" for English; "es" for Spanish etc. If not set, use "en" for English as
default.
:paramtype language: str
"""
super().__init__(**kwargs)
self.question = question
self.text_documents = text_documents
self.language = language
class AnswersFromTextResult(_serialization.Model):
"""Represents the answer results.
:ivar answers: Represents the answer results.
:vartype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
"""
_attribute_map = {
"answers": {"key": "answers", "type": "[TextAnswer]"},
}
def __init__(self, *, answers: Optional[List["_models.TextAnswer"]] = None, **kwargs: Any) -> None:
"""
:keyword answers: Represents the answer results.
:paramtype answers: list[~azure.ai.language.questionanswering.models.TextAnswer]
"""
super().__init__(**kwargs)
self.answers = answers
class AnswersOptions(_serialization.Model):
"""Parameters to query a knowledge base.
:ivar qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
:vartype qna_id: int
:ivar question: User question to query against the knowledge base.
:vartype question: str
:ivar top: Max number of answers to be returned for the question.
:vartype top: int
:ivar user_id: Unique identifier for the user.
:vartype user_id: str
:ivar confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1.
:vartype confidence_threshold: float
:ivar answer_context: Context object with previous QnA's information.
:vartype answer_context: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext
:ivar ranker_kind: Type of ranker to be used.
:vartype ranker_kind: str
:ivar filters: Filter QnAs based on given metadata list and knowledge base sources.
:vartype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:ivar short_answer_options: To configure Answer span prediction feature.
:vartype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions
:ivar include_unstructured_sources: (Optional) Flag to enable Query over Unstructured Sources.
:vartype include_unstructured_sources: bool
"""
_validation = {
"confidence_threshold": {"maximum": 1, "minimum": 0},
}
_attribute_map = {
"qna_id": {"key": "qnaId", "type": "int"},
"question": {"key": "question", "type": "str"},
"top": {"key": "top", "type": "int"},
"user_id": {"key": "userId", "type": "str"},
"confidence_threshold": {"key": "confidenceScoreThreshold", "type": "float"},
"answer_context": {"key": "context", "type": "KnowledgeBaseAnswerContext"},
"ranker_kind": {"key": "rankerType", "type": "str"},
"filters": {"key": "filters", "type": "QueryFilters"},
"short_answer_options": {"key": "answerSpanRequest", "type": "ShortAnswerOptions"},
"include_unstructured_sources": {"key": "includeUnstructuredSources", "type": "bool"},
}
def __init__(
self,
*,
qna_id: Optional[int] = None,
question: Optional[str] = None,
top: Optional[int] = None,
user_id: Optional[str] = None,
confidence_threshold: Optional[float] = None,
answer_context: Optional["_models.KnowledgeBaseAnswerContext"] = None,
ranker_kind: Optional[str] = None,
filters: Optional["_models.QueryFilters"] = None,
short_answer_options: Optional["_models.ShortAnswerOptions"] = None,
include_unstructured_sources: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword qna_id: Exact QnA ID to fetch from the knowledge base, this field takes priority over
question.
:paramtype qna_id: int
:keyword question: User question to query against the knowledge base.
:paramtype question: str
:keyword top: Max number of answers to be returned for the question.
:paramtype top: int
:keyword user_id: Unique identifier for the user.
:paramtype user_id: str
:keyword confidence_threshold: Minimum threshold score for answers, value ranges from 0 to 1.
:paramtype confidence_threshold: float
:keyword answer_context: Context object with previous QnA's information.
:paramtype answer_context:
~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerContext
:keyword ranker_kind: Type of ranker to be used.
:paramtype ranker_kind: str
:keyword filters: Filter QnAs based on given metadata list and knowledge base sources.
:paramtype filters: ~azure.ai.language.questionanswering.models.QueryFilters
:keyword short_answer_options: To configure Answer span prediction feature.
:paramtype short_answer_options: ~azure.ai.language.questionanswering.models.ShortAnswerOptions
:keyword include_unstructured_sources: (Optional) Flag to enable Query over Unstructured
Sources.
:paramtype include_unstructured_sources: bool
"""
super().__init__(**kwargs)
self.qna_id = qna_id
self.question = question
self.top = top
self.user_id = user_id
self.confidence_threshold = confidence_threshold
self.answer_context = answer_context
self.ranker_kind = ranker_kind
self.filters = filters
self.short_answer_options = short_answer_options
self.include_unstructured_sources = include_unstructured_sources
class AnswerSpan(_serialization.Model):
"""Answer span object of QnA.
:ivar text: Predicted text of answer span.
:vartype text: str
:ivar confidence: Predicted score of answer span, value ranges from 0 to 1.
:vartype confidence: float
:ivar offset: The answer span offset from the start of answer.
:vartype offset: int
:ivar length: The length of the answer span.
:vartype length: int
"""
_validation = {
"confidence": {"maximum": 1, "minimum": 0},
}
_attribute_map = {
"text": {"key": "text", "type": "str"},
"confidence": {"key": "confidenceScore", "type": "float"},
"offset": {"key": "offset", "type": "int"},
"length": {"key": "length", "type": "int"},
}
def __init__(
self,
*,
text: Optional[str] = None,
confidence: Optional[float] = None,
offset: Optional[int] = None,
length: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword text: Predicted text of answer span.
:paramtype text: str
:keyword confidence: Predicted score of answer span, value ranges from 0 to 1.
:paramtype confidence: float
:keyword offset: The answer span offset from the start of answer.
:paramtype offset: int
:keyword length: The length of the answer span.
:paramtype length: int
"""
super().__init__(**kwargs)
self.text = text
self.confidence = confidence
self.offset = offset
self.length = length
class AnswersResult(_serialization.Model):
"""Represents List of Question Answers.
:ivar answers: Represents Answer Result list.
:vartype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
"""
_attribute_map = {
"answers": {"key": "answers", "type": "[KnowledgeBaseAnswer]"},
}
def __init__(self, *, answers: Optional[List["_models.KnowledgeBaseAnswer"]] = None, **kwargs: Any) -> None:
"""
:keyword answers: Represents Answer Result list.
:paramtype answers: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswer]
"""
super().__init__(**kwargs)
self.answers = answers
class Error(_serialization.Model):
"""The error object.
All required parameters must be populated in order to send to Azure.
:ivar code: One of a server-defined set of error codes. Required. Known values are:
"InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound",
"ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and "ServiceUnavailable".
:vartype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
:ivar message: A human-readable representation of the error. Required.
:vartype message: str
:ivar target: The target of the error.
:vartype target: str
:ivar details: An array of details about specific errors that led to this reported error.
:vartype details: list[~azure.ai.language.questionanswering.models.Error]
:ivar innererror: An object containing more specific information than the current object about
the error.
:vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
_validation = {
"code": {"required": True},
"message": {"required": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[Error]"},
"innererror": {"key": "innererror", "type": "InnerErrorModel"},
}
def __init__(
self,
*,
code: Union[str, "_models.ErrorCode"],
message: str,
target: Optional[str] = None,
details: Optional[List["_models.Error"]] = None,
innererror: Optional["_models.InnerErrorModel"] = None,
**kwargs: Any
) -> None:
"""
:keyword code: One of a server-defined set of error codes. Required. Known values are:
"InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound",
"ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound",
"AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling",
"AzureCognitiveSearchIndexLimitReached", "InternalServerError", and "ServiceUnavailable".
:paramtype code: str or ~azure.ai.language.questionanswering.models.ErrorCode
:keyword message: A human-readable representation of the error. Required.
:paramtype message: str
:keyword target: The target of the error.
:paramtype target: str
:keyword details: An array of details about specific errors that led to this reported error.
:paramtype details: list[~azure.ai.language.questionanswering.models.Error]
:keyword innererror: An object containing more specific information than the current object
about the error.
:paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
super().__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
self.innererror = innererror
class ErrorResponse(_serialization.Model):
"""Error response.
:ivar error: The error object.
:vartype error: ~azure.ai.language.questionanswering.models.Error
"""
_attribute_map = {
"error": {"key": "error", "type": "Error"},
}
def __init__(self, *, error: Optional["_models.Error"] = None, **kwargs: Any) -> None:
"""
:keyword error: The error object.
:paramtype error: ~azure.ai.language.questionanswering.models.Error
"""
super().__init__(**kwargs)
self.error = error
class InnerErrorModel(_serialization.Model):
"""An object containing more specific information about the error. As per Microsoft One API
guidelines -
https://github.com/Microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses.
All required parameters must be populated in order to send to Azure.
:ivar code: One of a server-defined set of error codes. Required. Known values are:
"InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and "ExtractionFailure".
:vartype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
:ivar message: Error message. Required.
:vartype message: str
:ivar details: Error details.
:vartype details: dict[str, str]
:ivar target: Error target.
:vartype target: str
:ivar innererror: An object containing more specific information than the current object about
the error.
:vartype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
_validation = {
"code": {"required": True},
"message": {"required": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"details": {"key": "details", "type": "{str}"},
"target": {"key": "target", "type": "str"},
"innererror": {"key": "innererror", "type": "InnerErrorModel"},
}
def __init__(
self,
*,
code: Union[str, "_models.InnerErrorCode"],
message: str,
details: Optional[Dict[str, str]] = None,
target: Optional[str] = None,
innererror: Optional["_models.InnerErrorModel"] = None,
**kwargs: Any
) -> None:
"""
:keyword code: One of a server-defined set of error codes. Required. Known values are:
"InvalidRequest", "InvalidParameterValue", "KnowledgeBaseNotFound",
"AzureCognitiveSearchNotFound", "AzureCognitiveSearchThrottling", and "ExtractionFailure".
:paramtype code: str or ~azure.ai.language.questionanswering.models.InnerErrorCode
:keyword message: Error message. Required.
:paramtype message: str
:keyword details: Error details.
:paramtype details: dict[str, str]
:keyword target: Error target.
:paramtype target: str
:keyword innererror: An object containing more specific information than the current object
about the error.
:paramtype innererror: ~azure.ai.language.questionanswering.models.InnerErrorModel
"""
super().__init__(**kwargs)
self.code = code
self.message = message
self.details = details
self.target = target
self.innererror = innererror
class KnowledgeBaseAnswer(_serialization.Model):
"""Represents knowledge base answer.
:ivar questions: List of questions associated with the answer.
:vartype questions: list[str]
:ivar answer: Answer text.
:vartype answer: str
:ivar confidence: Answer confidence score, value ranges from 0 to 1.
:vartype confidence: float
:ivar qna_id: ID of the QnA result.
:vartype qna_id: int
:ivar source: Source of QnA result.
:vartype source: str
:ivar metadata: Metadata associated with the answer, useful to categorize or filter question
answers.
:vartype metadata: dict[str, str]
:ivar dialog: Dialog associated with Answer.
:vartype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
:ivar short_answer: Answer span object of QnA with respect to user's question.
:vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan
"""
_validation = {
"confidence": {"maximum": 1, "minimum": 0},
}
_attribute_map = {
"questions": {"key": "questions", "type": "[str]"},
"answer": {"key": "answer", "type": "str"},
"confidence": {"key": "confidenceScore", "type": "float"},
"qna_id": {"key": "id", "type": "int"},
"source": {"key": "source", "type": "str"},
"metadata": {"key": "metadata", "type": "{str}"},
"dialog": {"key": "dialog", "type": "KnowledgeBaseAnswerDialog"},
"short_answer": {"key": "answerSpan", "type": "AnswerSpan"},
}
def __init__(
self,
*,
questions: Optional[List[str]] = None,
answer: Optional[str] = None,
confidence: Optional[float] = None,
qna_id: Optional[int] = None,
source: Optional[str] = None,
metadata: Optional[Dict[str, str]] = None,
dialog: Optional["_models.KnowledgeBaseAnswerDialog"] = None,
short_answer: Optional["_models.AnswerSpan"] = None,
**kwargs: Any
) -> None:
"""
:keyword questions: List of questions associated with the answer.
:paramtype questions: list[str]
:keyword answer: Answer text.
:paramtype answer: str
:keyword confidence: Answer confidence score, value ranges from 0 to 1.
:paramtype confidence: float
:keyword qna_id: ID of the QnA result.
:paramtype qna_id: int
:keyword source: Source of QnA result.
:paramtype source: str
:keyword metadata: Metadata associated with the answer, useful to categorize or filter question
answers.
:paramtype metadata: dict[str, str]
:keyword dialog: Dialog associated with Answer.
:paramtype dialog: ~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerDialog
:keyword short_answer: Answer span object of QnA with respect to user's question.
:paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan
"""
super().__init__(**kwargs)
self.questions = questions
self.answer = answer
self.confidence = confidence
self.qna_id = qna_id
self.source = source
self.metadata = metadata
self.dialog = dialog
self.short_answer = short_answer
class KnowledgeBaseAnswerContext(_serialization.Model):
"""Context object with previous QnA's information.
All required parameters must be populated in order to send to Azure.
:ivar previous_qna_id: Previous turn top answer result QnA ID. Required.
:vartype previous_qna_id: int
:ivar previous_question: Previous user query.
:vartype previous_question: str
"""
_validation = {
"previous_qna_id": {"required": True},
}
_attribute_map = {
"previous_qna_id": {"key": "previousQnaId", "type": "int"},
"previous_question": {"key": "previousUserQuery", "type": "str"},
}
def __init__(self, *, previous_qna_id: int, previous_question: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword previous_qna_id: Previous turn top answer result QnA ID. Required.
:paramtype previous_qna_id: int
:keyword previous_question: Previous user query.
:paramtype previous_question: str
"""
super().__init__(**kwargs)
self.previous_qna_id = previous_qna_id
self.previous_question = previous_question
class KnowledgeBaseAnswerDialog(_serialization.Model):
"""Dialog associated with Answer.
:ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If
true, do not include this QnA as search result for queries without context; otherwise, if
false, ignores context and includes this QnA in search result.
:vartype is_context_only: bool
:ivar prompts: List of prompts associated with the answer.
:vartype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
"""
_validation = {
"prompts": {"max_items": 20, "min_items": 0},
}
_attribute_map = {
"is_context_only": {"key": "isContextOnly", "type": "bool"},
"prompts": {"key": "prompts", "type": "[KnowledgeBaseAnswerPrompt]"},
}
def __init__(
self,
*,
is_context_only: Optional[bool] = None,
prompts: Optional[List["_models.KnowledgeBaseAnswerPrompt"]] = None,
**kwargs: Any
) -> None:
"""
:keyword is_context_only: To mark if a prompt is relevant only with a previous question or not.
If true, do not include this QnA as search result for queries without context; otherwise, if
false, ignores context and includes this QnA in search result.
:paramtype is_context_only: bool
:keyword prompts: List of prompts associated with the answer.
:paramtype prompts: list[~azure.ai.language.questionanswering.models.KnowledgeBaseAnswerPrompt]
"""
super().__init__(**kwargs)
self.is_context_only = is_context_only
self.prompts = prompts
class KnowledgeBaseAnswerPrompt(_serialization.Model):
"""Prompt for an answer.
:ivar display_order: Index of the prompt - used in ordering of the prompts.
:vartype display_order: int
:ivar qna_id: QnA ID corresponding to the prompt.
:vartype qna_id: int
:ivar display_text: Text displayed to represent a follow up question prompt.
:vartype display_text: str
"""
_validation = {
"display_text": {"max_length": 200},
}
_attribute_map = {
"display_order": {"key": "displayOrder", "type": "int"},
"qna_id": {"key": "qnaId", "type": "int"},
"display_text": {"key": "displayText", "type": "str"},
}
def __init__(
self,
*,
display_order: Optional[int] = None,
qna_id: Optional[int] = None,
display_text: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword display_order: Index of the prompt - used in ordering of the prompts.
:paramtype display_order: int
:keyword qna_id: QnA ID corresponding to the prompt.
:paramtype qna_id: int
:keyword display_text: Text displayed to represent a follow up question prompt.
:paramtype display_text: str
"""
super().__init__(**kwargs)
self.display_order = display_order
self.qna_id = qna_id
self.display_text = display_text
class MetadataFilter(_serialization.Model):
"""Find QnAs that are associated with the given list of metadata.
:ivar metadata:
:vartype metadata: list[JSON]
:ivar logical_operation: Operation used to join metadata filters.
:vartype logical_operation: str
"""
_attribute_map = {
"metadata": {"key": "metadata", "type": "[object]"},
"logical_operation": {"key": "logicalOperation", "type": "str"},
}
def __init__(
self, *, metadata: Optional[List[JSON]] = None, logical_operation: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword metadata:
:paramtype metadata: list[JSON]
:keyword logical_operation: Operation used to join metadata filters.
:paramtype logical_operation: str
"""
super().__init__(**kwargs)
self.metadata = metadata
self.logical_operation = logical_operation
class QueryFilters(_serialization.Model):
"""filters over knowledge base.
:ivar metadata_filter: Find QnAs that are associated with the given list of metadata.
:vartype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
:ivar source_filter: Find QnAs that are associated with any of the given list of sources in
knowledge base.
:vartype source_filter: list[str]
:ivar logical_operation: Logical operation used to join metadata filter with source filter.
:vartype logical_operation: str
"""
_attribute_map = {
"metadata_filter": {"key": "metadataFilter", "type": "MetadataFilter"},
"source_filter": {"key": "sourceFilter", "type": "[str]"},
"logical_operation": {"key": "logicalOperation", "type": "str"},
}
def __init__(
self,
*,
metadata_filter: Optional["_models.MetadataFilter"] = None,
source_filter: Optional[List[str]] = None,
logical_operation: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword metadata_filter: Find QnAs that are associated with the given list of metadata.
:paramtype metadata_filter: ~azure.ai.language.questionanswering.models.MetadataFilter
:keyword source_filter: Find QnAs that are associated with any of the given list of sources in
knowledge base.
:paramtype source_filter: list[str]
:keyword logical_operation: Logical operation used to join metadata filter with source filter.
:paramtype logical_operation: str
"""
super().__init__(**kwargs)
self.metadata_filter = metadata_filter
self.source_filter = source_filter
self.logical_operation = logical_operation
class ShortAnswerOptions(_serialization.Model):
"""To configure Answer span prediction feature.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar enable: Enable or disable Answer Span prediction. Required. Default value is True.
:vartype enable: bool
:ivar confidence_threshold: Minimum threshold score required to include an answer span, value
ranges from 0 to 1.
:vartype confidence_threshold: float
:ivar top: Number of Top answers to be considered for span prediction from 1 to 10.
:vartype top: int
"""
_validation = {
"enable": {"required": True, "constant": True},
"confidence_threshold": {"maximum": 1, "minimum": 0},
"top": {"maximum": 10, "minimum": 1},
}
_attribute_map = {
"enable": {"key": "enable", "type": "bool"},
"confidence_threshold": {"key": "confidenceScoreThreshold", "type": "float"},
"top": {"key": "topAnswersWithSpan", "type": "int"},
}
enable = True
def __init__(
self, *, confidence_threshold: Optional[float] = None, top: Optional[int] = None, **kwargs: Any
) -> None:
"""
:keyword confidence_threshold: Minimum threshold score required to include an answer span,
value ranges from 0 to 1.
:paramtype confidence_threshold: float
:keyword top: Number of Top answers to be considered for span prediction from 1 to 10.
:paramtype top: int
"""
super().__init__(**kwargs)
self.confidence_threshold = confidence_threshold
self.top = top
class TextAnswer(_serialization.Model):
"""Represents answer result.
:ivar answer: Answer.
:vartype answer: str
:ivar confidence: answer confidence score, value ranges from 0 to 1.
:vartype confidence: float
:ivar id: record ID.
:vartype id: str
:ivar short_answer: Answer span object with respect to user's question.
:vartype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan
:ivar offset: The sentence offset from the start of the document.
:vartype offset: int
:ivar length: The length of the sentence.
:vartype length: int
"""
_validation = {
"confidence": {"maximum": 1, "minimum": 0},
}
_attribute_map = {
"answer": {"key": "answer", "type": "str"},
"confidence": {"key": "confidenceScore", "type": "float"},
"id": {"key": "id", "type": "str"},
"short_answer": {"key": "answerSpan", "type": "AnswerSpan"},
"offset": {"key": "offset", "type": "int"},
"length": {"key": "length", "type": "int"},
}
def __init__(
self,
*,
answer: Optional[str] = None,
confidence: Optional[float] = None,
id: Optional[str] = None, # pylint: disable=redefined-builtin
short_answer: Optional["_models.AnswerSpan"] = None,
offset: Optional[int] = None,
length: Optional[int] = None,
**kwargs: Any
) -> None:
"""
:keyword answer: Answer.
:paramtype answer: str
:keyword confidence: answer confidence score, value ranges from 0 to 1.
:paramtype confidence: float
:keyword id: record ID.
:paramtype id: str
:keyword short_answer: Answer span object with respect to user's question.
:paramtype short_answer: ~azure.ai.language.questionanswering.models.AnswerSpan
:keyword offset: The sentence offset from the start of the document.
:paramtype offset: int
:keyword length: The length of the sentence.
:paramtype length: int
"""
super().__init__(**kwargs)
self.answer = answer
self.confidence = confidence
self.id = id
self.short_answer = short_answer
self.offset = offset
self.length = length
class TextDocument(_serialization.Model):
"""Represent input text record to be queried.
All required parameters must be populated in order to send to Azure.
:ivar id: Unique identifier for the text record. Required.
:vartype id: str
:ivar text: Text contents of the record. Required.
:vartype text: str
"""
_validation = {
"id": {"required": True},
"text": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"text": {"key": "text", "type": "str"},
}
def __init__(self, *, id: str, text: str, **kwargs: Any) -> None: # pylint: disable=redefined-builtin
"""
:keyword id: Unique identifier for the text record. Required.
:paramtype id: str
:keyword text: Text contents of the record. Required.
:paramtype text: str
"""
super().__init__(**kwargs)
self.id = id
self.text = text
|
9,865 | d2049b20e00b45df9fb0772d9a654a58a00191c5 | def decorate(a):
def inner(f):
def decorated(*args, **kwargs):
return f(a, *args, **kwargs)
return decorated
return inner
@decorate(3)
def func(a, b, c):
print a, b, c
func(1, 2)
|
9,866 | 1c222f42c5c0178f97391f1bdc60bba110f3d118 | from django.urls import path, re_path
from .views import *
app_name = 'articles'
urlpatterns = [
path('',articles_list,name='list'),
path('create', create_article, name='create'),
path('<slug:slug>', article_detail,name='detail'),
] |
9,867 | 441d224c37e0eae531c17db0e903b3344c570516 | from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import Context
from books.models import book,Author
def index(request):
book_list=book.objects.all()
c=Context({"book_list":book_list})
return render_to_response("index.html",c)
|
9,868 | 4f2017632d905c80c35fbaead83ecb7e1ac95760 | from page_parsing import get_item_info_from,url_list,item_info,get_links_from
# ================================================= < <链接去重 > > =====================================================
# 设计思路:
# 1.分两个数据库,第一个用于只用于存放抓取下来的 url (ulr_list);第二个则储存 url 对应的物品详情信息(item_info)
# 2.在抓取过程中在第二个数据库中写入数据的同时,新增一个字段(key) 'index_url' 即该详情对应的链接
# 3.若抓取中断,在第二个存放详情页信息的数据库中的 url 字段应该是第一个数据库中 url 集合的子集
# 4.两个集合的 url 相减得出剩下应该抓取的 url 还有哪些
db_urls = [item['url'] for item in url_list.find()] # 用列表解析式装入所有要爬取的链接
index_urls = [item['url'] for item in item_info.find()] # 所引出详情信息数据库中所有的现存的 url 字段
x = set(db_urls) # 转换成集合的数据结构
y = set(index_urls)
rest_of_urls = x-y # 相减
# ======================================================================================================================
|
9,869 | e5fd0fc13a39444a934eea3bd24056073d28eff2 | #!/usr/bin/env python
from __future__ import division
import sys
import math
logs = sys.stderr
from collections import defaultdict
import time
from mytime import Mytime
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w")
flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict")
flags.DEFINE_boolean("featstat", False, "print feature stats")
flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow")
flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module")
flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN")
flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info")
flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words")
flags.DEFINE_boolean("unkdel", False, "remove features involving unks")
flags.DEFINE_boolean("s2", True, "use s2t features")
def new_vector():
return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda
class Model(object):
'''templates and weights.'''
## __slots__ = "templates", "weights", "list_templates", "freq_templates"
names = ["SHIFT", "LEFT", "RIGHT"]
indent = " " * 4
eval_module = None # by default, use my handwritten static_eval()
def __init__(self, weightstr):
self.knowns = set()
self.unk = FLAGS.unk
self.unktag = FLAGS.unktag
self.unkdel = FLAGS.unkdel
assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true"
if FLAGS.svector: # now it is known
global svector
try:
svector = __import__("svector")
print >> logs, "WARNING: using David's svector (Cython). Performance might suffer."
except:
print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)."
FLAGS.svector = False # important
self.templates = {} # mapping from "s0t-q0t" to the eval expression
self.list_templates = [] # ordered list of template keys "s0t-q0t"
self.freq_templates = defaultdict(int)
self.weights = new_vector() #Vector()
self.read_weights(weightstr)
## self.featurenames = set(self.weights.iterkeys())
if FLAGS.featstat:
self.print_templates()
def count_knowns_from_train(self, trainfile, devfile):
'''used in training'''
print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag)
stime = time.time()
words = defaultdict(int)
for i, line in enumerate(open(trainfile)):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
words[word] += 1
if FLAGS.debug_wordfreq:
devunk1 = set()
devunk0 = set()
for line in open(devfile):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
if words[word] <= self.unk and words[word] > 0:
devunk1.add(word)
if words[word] == 0:
devunk0.add(word)
print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1))
print >> logs
print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0))
## freqs = defaultdict(list)
## for word, freq in words.items():
## freqs[freq].append(word)
## for freq in sorted(freqs, reverse=True):
## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq]))
## print >> logs
self.knowns = set()
for word, freq in words.items():
if freq > self.unk:
self.knowns.add(word)
print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \
(i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)
## print >> logs, " ".join(sorted(self.knowns))
def add_template(self, s, freq=1):
## like this: "s0w-s0t=%s|%s" % (s0w, s0t)
symbols = s.split("-") # static part: s0w-s0t
if s not in self.templates:
tmp = '"%s=%s" %% (%s)' % (s, \
"|".join(["%s"] * len(symbols)), \
", ".join(symbols))
self.templates[s] = compile(tmp, "2", "eval")
self.list_templates.append((s, tmp)) # in order
self.freq_templates[s] += int(freq)
def print_autoevals(self):
tfilename = str(int(time.time()))
templatefile = open("/tmp/%s.py" % tfilename, "wt")
print >> templatefile, "#generated by model.py"
print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'"
print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):"
print >> templatefile, "%sreturn [" % Model.indent
for s, e in self.list_templates:
print >> templatefile, "%s%s," % (Model.indent * 2, e)
print >> templatefile, "%s]" % (Model.indent * 2)
templatefile.close()
if FLAGS.autoeval:
sys.path.append('/tmp/')
print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename
# to be used in newstate
Model.eval_module = __import__(tfilename)
else:
Model.eval_module = Model
def print_templates(self, f=logs):
print >> f, ">>> %d templates in total:" % len(self.templates)
print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \
for x, _ in self.list_templates])
print >> f, "---"
def read_templates(self, filename):
## try interpreting it as a filename, if failed, then as a string
try:
f = open(filename)
print >> logs, "reading templates from %s" % filename,
for x in f:
if x[:3] == "---":
break
if x[:3] == ">>>":
continue
try:
s, freq = x.split()
except:
s, freq = x, 1
self.add_template(s, freq)
except:
## from argv string rather than file
for x in filename.split():
self.add_template(x)
f = None
print >> logs, "%d feature templates read." % len(self.templates)
return f
def read_weights(self, filename, infertemplates=False):
'''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"'''
infile = self.read_templates(filename)
infertemplates = len(self.templates) <= 1
if infertemplates:
print >> logs, "will infer templates from weights..."
mytime = Mytime()
i = 0
if infile is not None:
print >> logs, "reading feature weights from %s\t" % filename,
for i, line in enumerate(infile, 1):
if i % 200000 == 0:
print >> logs, "%d lines read..." % i,
if line[0] == " ":
# TODO: separate known words line (last line)
self.knowns = set(line.split())
print >> logs, "\n%d known words read." % len(self.knowns)
self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x
break
feat, weight = line.split()
self.weights[feat] = float(weight)
if infertemplates:
self.add_template(feat.split("=", 1)[0], 1) ## one occurrence
print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \
(len(self.weights), i, mytime.period())
self.print_autoevals()
def make_feats(self, state):
'''returns a *list* of feature templates for state.'''
fv = new_vector() #Vector()
top = state.top()
topnext = state.top(1)
top3rd = state.top(2)
qhead = state.qhead()
qnext = state.qhead(1)
## this part is manual; their combinations are automatic
s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...)
s1 = topnext.head() if topnext is not None else ("<s>", "<s>")
s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>")
q0 = qhead if qhead is not None else ("</s>", "</s>")
q1 = qnext if qnext is not None else ("</s>", "</s>")
s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE"
s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE"
s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE"
s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE"
## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here!
return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))
# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys
def write(self, filename="-", weights=None):
if weights is None:
weights = self.weights
if filename == "-":
outfile = sys.stdout
filename = "STDOUT" # careful overriding
else:
outfile = open(filename, "wt")
self.print_templates(outfile)
mytime = Mytime()
nonzero = 0
print >> logs, "sorting %d features..." % len(weights),
for i, f in enumerate(sorted(weights), 1):
if i == 1: # sorting done
print >> logs, "done in %.2lf seconds." % mytime.period()
print >> logs, "writing features to %s..." % filename
v = weights[f]
if math.fabs(v) > 1e-3:
print >> outfile, "%s\t%.5lf" % (f, v)
nonzero += 1
if self.unk > 0: # print known words
print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark
print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \
(nonzero, mytime.period()) ## nonzero != i
@staticmethod
def trim(fv):
for f in fv:
if math.fabs(fv[f]) < 1e-3:
del fv[f]
return fv
@staticmethod
def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):
return ["q0t=%s" % (q0t),
"q0w-q0t=%s|%s" % (q0w, q0t),
"q0w=%s" % (q0w),
"s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t),
"s0t-q0t=%s|%s" % (s0t, q0t),
"s0t-s1t=%s|%s" % (s0t, s1t),
"s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t),
"s0t=%s" % (s0t),
"s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t),
"s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t),
"s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t),
"s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w),
"s0w-s0t=%s|%s" % (s0w, s0t),
"s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t),
"s0w-s1w=%s|%s" % (s0w, s1w),
"s0w=%s" % (s0w),
"s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t),
"s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct),
"s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct),
"s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t),
"s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct),
"s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t),
"s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w),
"s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t),
"s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w),
"s1t=%s" % (s1t),
"s1w-s1t=%s|%s" % (s1w, s1t),
"s1w=%s" % (s1w),
"s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)]
def prune(self, filenames):
'''prune features from word/tag lines'''
print >> logs, "pruning features using %s..." % filenames,
fullset = set()
for filename in filenames.split():
for l in open(filename):
for w, t in map(lambda x:x.rsplit("/", 1), l.split()):
fullset.add(w)
fullset.add(t)
print >> logs, "collected %d uniq words & tags..." % (len(fullset)),
new = new_vector() # Vector()
for f in self.weights:
stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution
for s in stuff:
if s not in fullset:
break
else:
new[f] = self.weights[f]
print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights))
self.weights = new
def sparsify(self, z=1):
'''duchi et al., 2008'''
if __name__ == "__main__":
flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)")
try:
argv = FLAGS(sys.argv)
if FLAGS.weights is None:
raise flags.FlagsError("must specify weights by -w ...")
except flags.FlagsError, e:
print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
FLAGS.featstat = True
model = Model(FLAGS.weights) #.model, FLAGS.weights)
if FLAGS.prune:
model.prune(FLAGS.prune)
if FLAGS.outputweights:
model.write(FLAGS.outputweights)
|
9,870 | 3ec162070f79ae38d6ae3ceb858c15b6e39f7027 | #the method of same name present in any class, it is call by anywhere
#object of different type is responds to same methods
class pycharm:
def execute(self):
print("COde check")
print("compile")
class MyEditor:
def execute(self):
print("Spell Cheack")
print("Auto COmpile")
print("COde check")
print("compile")
class laptop:
def code(self,ide):
ide.execute()
ide=pycharm()
ide2=MyEditor()
a1=laptop()
a1.code(ide)
print()
a1.code(ide2)
|
9,871 | 1ddec426e4ad50f1d0e8a57ed841fbdf8c51b00f | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <jordi.ballester@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import tools
from osv import fields, osv
from tools.translate import _
from datetime import datetime, timedelta
date_format = '%Y-%m-%d'
class tax(osv.Model):
_inherit = 'sgr.tax'
def send_alerts(self, cr, uid, context=None):
self.send_alerts_with_upcoming_days(cr, uid, 2, context=context)
def send_alerts_with_upcoming_days(self, cr, uid, upcoming_days, context=None):
now = datetime.now()
now_plus_upcoming_days = now + timedelta(days=upcoming_days)
tax_to_paid_ids = self.search(cr, uid, [('state','=','to_pay')], context=context)
tax_due_date_soon = []
taxs_due = []
overdue_taxs = []
for tax in self.browse(cr, uid, tax_to_paid_ids, context=context):
if not tax.approval_date:
continue
approval_date = datetime.strptime(tax.approval_date, date_format)
if approval_date <= now:
overdue_taxs.append(tax)
elif now < approval_date and approval_date <= now_plus_upcoming_days:
taxs_due.append(tax)
for tax in taxs_due:
self.message_post(cr, uid, [tax.id], body="Tax payment deadline soon", subtype="sgr_alerts.mt_tax_due_date_soon", context=context)
for tax in overdue_taxs:
self.message_post(cr, uid, [tax.id], body="Tax payment deadline expired", subtype="sgr_alerts.mt_tax_due_date", context=context)
#all_tax_ids = self.search(cr, uid, [], context=context)
#for tax in self.browse(cr, uid, all_tax_ids, context=context):
# print 'tax: ' + str(tax.id)
# self.message_post(cr, uid, [tax.id], body="Due Date Soon", subtype="sgr_alerts.mt_tax_due_date_soon", context=context)
return True
tax()
|
9,872 | 409e0fc0b1c1d86c5526d33ba271a8387eecf748 | # -*- coding: cp1251 -*-
import arcpy as a
from arcpy import AddMessage as msg, AddWarning as warning, AddError as error
from os import mkdir, walk
from os.path import join, dirname, basename, splitext
from glob import glob as get_files
from shutil import copy
from collections import OrderedDict
input_folder = a.GetParameterAsText(0)
output_folder = a.GetParameterAsText(1)
enable_rewrite_databases = a.GetParameterAsText(2)
enable_rewrite_tabs = a.GetParameterAsText(3)
input_folders_order = [root.replace(input_folder + '\\', '') for root, dirs, _ in walk(input_folder)]
output_folders_order = [root.replace(output_folder + '\\', '') for root, dirs, _ in walk(output_folder)]
input_folders_unordered_dict = {root.replace(input_folder + '\\', ''):dirs for root, dirs, _ in walk(input_folder)}
output_folders_unordered_dict = {root.replace(output_folder + '\\', ''):dirs for root, dirs, _ in walk(output_folder)}
input_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)
output_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)
msg("\nПроверка на наличие подпапок исходной папки в выходной:")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("\nПроверка на наличие подпапок выходной папки в исходной:")
remove_list = []
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("\nКопирование файлов в папки...")
remove_list = []
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
if not tab_files:
remove_list.append(subfolders)
if u"Импорт" in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, "*.*"))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("\nСоздание баз данных...")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + ".mdb")
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name + ".mdb"))
try:
a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + ".mdb")
msg(" " + mdb_local_path)
except a.ExecuteError:
warning(" " + mdb_local_path)
msg("\nКонвертация TAB в слои...")
layer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))
|
9,873 | 01852f6dbeb78df3098b14d2f0538ad9193ea511 | __version__ = '3.13.7'
|
9,874 | 504d4afc4b3e708d43110a2d85676fb745f1aba8 | from django.shortcuts import render
from django.http import Http404
from thermometer.models import Therm
def index(request):
therms = Therm.objects.all()
return render(request, 'thermometer/index.html', {
'therms': therms,
})
def fetchsquare(request, id):
try:
therm = Therm.objects.get(id=id)
except Therm.DoesNotExist:
raise Http404('This item does not exist')
return render(request, 'thermometer/fetchsquare.html', {
'therm': therm,
}) |
9,875 | a77fb90cdc6e7f9b70f9feeefc2b7f8e93a2d8c5 | # wilfred.py
# Authors
# Stuart C. Larsen (SCL)
# Daryl W. Bennet (DWB)
# Set up three main modules (command, control, reconnaissance),
# and then enter main event loop.
#
# Command:
# Gather mission priorities and objectives, such as turn left, turn right
# goto GPS 45, 65, land, take off.
#
# Control:
# Fly the craft to complete the command objective.
#
# Reconnaissance:
# Gather information about wilfreds current position.
#
# Main Event Loop:
# Check command listing for new updates, check reconnaisannce for current
# posistion, and then control the craft to the correct zone. Main loop will
# be a very fast feedback loop.
import command
import driver
from debug import *
def mainLoop():
wilfredCommunication = command.Command()
wilfredCommunication.waitForClient()
wilfredCommand = command.Command()
while True:
if not wilfredCommunication.checkConnection():
wilfredCommunication.waitForClient()
commands = wilfredCommunication.getCommand()
for commandData in commands.split('\n'):
cmd = commandData.split(' ')[0].strip()
if cmd == "": continue
args = [arg.strip() for arg in commandData.split(' ')[1:]]
# setMotorSpeed (0-3) (0-100)
if cmd == "setMotorSpeed":
motorNum = int(args[0])
motorSpeed = int(args[1])
wilfredCommand.setMotorSpeed(motorNum, motorSpeed)
elif cmd == "playMeow":
goodMessage("wilfred: playing meow from file: ", args[0])
wilfredCommand.playMeow(args[0])
elif cmd == "getAccel":
goodMessage("wilfred: returning acceleration...")
wilfredCommunication.sendMessage("(0, 0, 0)")
else:
errorMessage("wilfred: command not recognized: ", cmd, ": ", args)
if __name__ == "__main__":
mainLoop()
|
9,876 | 31f91e67d0adde0a984a6d162ea5607f06e9208e | #!/usr/local/autopkg/python
"""
JamfExtensionAttributeUploader processor for uploading extension attributes
to Jamf Pro using AutoPkg
by G Pugh
"""
import os
import sys
from time import sleep
from xml.sax.saxutils import escape
from autopkglib import ProcessorError # pylint: disable=import-error
# to use a base module in AutoPkg we need to add this path to the sys.path.
# this violates flake8 E402 (PEP8 imports) but is unavoidable, so the following
# imports require noqa comments for E402
sys.path.insert(0, os.path.dirname(__file__))
from JamfUploaderLib.JamfUploaderBase import JamfUploaderBase # noqa: E402
__all__ = ["JamfExtensionAttributeUploader"]
class JamfExtensionAttributeUploader(JamfUploaderBase):
description = (
"A processor for AutoPkg that will upload an Extension Attribute item to a "
"Jamf Cloud or on-prem server."
)
input_variables = {
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"ea_name": {
"required": False,
"description": "Extension Attribute name",
"default": "",
},
"ea_script_path": {
"required": False,
"description": "Full path to the script to be uploaded",
},
"replace_ea": {
"required": False,
"description": "Overwrite an existing category if True.",
"default": False,
},
"ea_inventory_display": {
"required": False,
"description": "Inventory Display value for the EA.",
"default": "Extension Attributes",
},
"ea_data_type": {
"required": False,
"description": "Data type for the EA. One of String, Integer or Date.",
"default": "String",
},
"sleep": {
"required": False,
"description": "Pause after running this processor for specified seconds.",
"default": "0",
},
}
output_variables = {
"jamfextensionattributeuploader_summary_result": {
"description": "Description of interesting results.",
},
}
def upload_ea(
self,
jamf_url,
ea_name,
ea_data_type,
ea_inventory_display,
script_path,
obj_id=None,
enc_creds="",
token="",
):
"""Update extension attribute metadata."""
# import script from file and replace any keys in the script
if os.path.exists(script_path):
with open(script_path, "r") as file:
script_contents = file.read()
else:
raise ProcessorError("Script does not exist!")
# substitute user-assignable keys
script_contents = self.substitute_assignable_keys(script_contents)
# XML-escape the script
script_contents_escaped = escape(script_contents)
# build the object
ea_data = (
"<computer_extension_attribute>"
+ "<name>{}</name>".format(ea_name)
+ "<enabled>true</enabled>"
+ "<description/>"
+ "<data_type>{}</data_type>".format(ea_data_type)
+ "<input_type>"
+ " <type>script</type>"
+ " <platform>Mac</platform>"
+ " <script>{}</script>".format(script_contents_escaped)
+ "</input_type>"
+ "<inventory_display>{}</inventory_display>".format(ea_inventory_display)
+ "<recon_display>Extension Attributes</recon_display>"
+ "</computer_extension_attribute>"
)
self.output(
"Extension Attribute data:",
verbose_level=2,
)
self.output(
ea_data,
verbose_level=2,
)
self.output("Uploading Extension Attribute..")
# write the template to temp file
template_xml = self.write_temp_file(ea_data)
# if we find an object ID we put, if not, we post
object_type = "extension_attribute"
url = "{}/{}/id/{}".format(jamf_url, self.api_endpoints(object_type), obj_id)
count = 0
while True:
count += 1
self.output(
"Extension Attribute upload attempt {}".format(count),
verbose_level=2,
)
request = "PUT" if obj_id else "POST"
r = self.curl(
request=request,
url=url,
enc_creds=enc_creds,
token=token,
data=template_xml,
)
# check HTTP response
if self.status_check(r, "Extension Attribute", ea_name, request) == "break":
break
if count > 5:
self.output(
"ERROR: Extension Attribute upload did not succeed after 5 attempts"
)
self.output("\nHTTP POST Response Code: {}".format(r.status_code))
raise ProcessorError("ERROR: Extension Attribute upload failed ")
if int(self.sleep) > 30:
sleep(int(self.sleep))
else:
sleep(30)
def main(self):
"""Do the main thing here"""
self.jamf_url = self.env.get("JSS_URL")
self.jamf_user = self.env.get("API_USERNAME")
self.jamf_password = self.env.get("API_PASSWORD")
self.ea_script_path = self.env.get("ea_script_path")
self.ea_name = self.env.get("ea_name")
self.replace = self.env.get("replace_ea")
self.ea_data_type = self.env.get("ea_data_type")
self.ea_inventory_display = self.env.get("ea_inventory_display")
self.sleep = self.env.get("sleep")
# handle setting replace in overrides
if not self.replace or self.replace == "False":
self.replace = False
# clear any pre-existing summary result
if "jamfextensionattributeuploader_summary_result" in self.env:
del self.env["jamfextensionattributeuploader_summary_result"]
ea_uploaded = False
# handle files with a relative path
if not self.ea_script_path.startswith("/"):
found_template = self.get_path_to_file(self.ea_script_path)
if found_template:
self.ea_script_path = found_template
else:
raise ProcessorError(f"ERROR: EA file {self.ea_script_path} not found")
# now start the process of uploading the object
self.output(f"Checking for existing '{self.ea_name}' on {self.jamf_url}")
# obtain the relevant credentials
token, send_creds, _ = self.handle_classic_auth(
self.jamf_url, self.jamf_user, self.jamf_password
)
# check for existing - requires obj_name
obj_type = "extension_attribute"
obj_name = self.ea_name
obj_id = self.get_api_obj_id_from_name(
self.jamf_url,
obj_name,
obj_type,
enc_creds=send_creds,
token=token,
)
if obj_id:
self.output(
"Extension Attribute '{}' already exists: ID {}".format(
self.ea_name, obj_id
)
)
if self.replace:
self.output(
"Replacing existing Extension Attribute as 'replace_ea' is set to {}".format(
self.replace
),
verbose_level=1,
)
else:
self.output(
"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.",
verbose_level=1,
)
return
# upload the EA
self.upload_ea(
self.jamf_url,
self.ea_name,
self.ea_data_type,
self.ea_inventory_display,
self.ea_script_path,
obj_id=obj_id,
enc_creds=send_creds,
token=token,
)
ea_uploaded = True
# output the summary
self.env["extension_attribute"] = self.ea_name
self.env["ea_uploaded"] = ea_uploaded
if ea_uploaded:
self.env["jamfextensionattributeuploader_summary_result"] = {
"summary_text": (
"The following extension attributes were created or "
"updated in Jamf Pro:"
),
"report_fields": ["name", "path"],
"data": {"name": self.ea_name, "path": self.ea_script_path},
}
if __name__ == "__main__":
PROCESSOR = JamfExtensionAttributeUploader()
PROCESSOR.execute_shell()
|
9,877 | 78123c806e5a8c0cc7511a5024769f8c61621efa | from math import *
import math
import re
import numpy as np
class atom:
aid=0
atype=''
x=0.0
y=0.0
z=0.0
rid=0
rtype=''
model=[]
chainid=''
def getlen(atm1,atm2):
dist=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2))
return dist
def getangle(atm1,atm2,atm3):
dist1=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2))
dist2=sqrt(pow(atm3.x-atm2.x,2)+pow(atm3.y-atm2.y,2)+pow(atm3.z-atm2.z,2))
dotp=(atm1.x-atm2.x)*(atm3.x-atm2.x)+(atm1.y-atm2.y)*(atm3.y-atm2.y)+(atm1.z-atm2.z)*(atm3.z-atm2.z)
angle=acos(dotp/(dist1*dist2))*180/pi
return angle
def getangledihedral(atm1,atm2,atm3,atm4):
ab=np.zeros(3)
bc=np.zeros(3)
cd=np.zeros(3)
p=[]
q=[]
ab[0]=atm2.x-atm1.x
ab[1]=atm2.y-atm1.y
ab[2]=atm2.z-atm1.z
bc[0]=atm3.x-atm2.x
bc[1]=atm3.y-atm2.y
bc[2]=atm3.z-atm2.z
cd[0]=atm4.x-atm3.x
cd[1]=atm4.y-atm3.y
cd[2]=atm4.z-atm3.z
p.append(ab[1]*bc[2]-ab[2]*bc[1])
p.append(ab[2]*bc[0]-ab[0]*bc[2])
p.append(ab[0]*bc[1]-ab[1]*bc[0])
q.append(bc[1]*cd[2]-bc[2]*cd[1])
q.append(bc[2]*cd[0]-bc[0]*cd[2])
q.append(bc[0]*cd[1]-bc[1]*cd[0])
r1=0
r2=0
dp=0
dpcd=0
for i in range(0,3):
r1 += math.pow(p[i],2)
r2 += math.pow(q[i],2)
dp += p[i]*q[i]
dpcd += p[i]*cd[i]
dih=(dpcd/abs(dpcd))*math.acos(dp/(math.sqrt(r1)*math.sqrt(r2)))*180/math.pi
return dih
def getdihedralstrain(a1,a2,a3,a4,a5):
dse=8.37*(1+math.cos(3*a1*math.pi/180))+8.37*(1+math.cos(3*a5*math.pi/180))+4.18*(1+math.cos(3*a2*math.pi/180))+4.18*(1+math.cos(3*a4*math.pi/180))+14.64*(1+math.cos(2*a3*math.pi/180))+2.51*(1+math.cos(3*a3*math.pi/180))
return dse
s_s_l=1.6
s_s_u=2.5
filetxt=open('filelist.txt')
txt_lines=filetxt.read().split('\n')
filetxt.close()
fileout=open('out_C-S-S-C_BACKBONE_scan.txt','w')
f1=open('error_C-S-S-C_scan.txt','w')
intr=[]
lenlines=len(txt_lines)
for ppp in range(lenlines):
filename=txt_lines[ppp]
if filename=='':
continue
print('%.2f'%((ppp+1)*100.0/(lenlines-1))+'% ('+str(ppp+1)+'/'+str(lenlines-1)+') Executing for:'+filename)
file=open(filename,'r')
lines=file.read().split('\n')
file.close()
T=[]
D=[]
S=[]
C=[]
SX=[]
TX=[]
A=[]
B=[]
E=[]
F=[]
modelno=[]
try:
for ln in lines:
if len(ln)>=6 and (ln[0:4]=='ATOM' or ln[0:6]=='HETATM'):
atm=atom()
atm.aid=int(ln[6:11])
atm.atype=ln[12:16].strip()
atm.rtype=ln[17:20].strip()
atm.chainid=ln[21]
atm.rid=int(ln[22:26])
atm.x=float(ln[30:38])
atm.y=float(ln[38:46])
atm.z=float(ln[47:54])
atm.model=modelno
symb=ln[13].strip()
if atm.atype=='CB' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS' :
C.append(atm)
D.append(atm)
if atm.atype=='SG'and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS':
SX.append(atm)
TX.append(atm)
if atm.atype=='CA' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS':
B.append(atm)
E.append(atm)
if atm.atype=='N' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS' :
A.append(atm)
F.append(atm)
elif len(ln)>=5 and ln[0:5]=='MODEL':
modelno=int(ln[12:])
except:
f1.write(filename+'\n')
for k in SX:
for k1 in SX:
if k1.chainid==k.chainid:
if k1.rid==k.rid and k1.aid!=k.aid :
break
else:
S.append(k)
for m in TX:
for m1 in TX:
if m1.chainid==m.chainid:
if m1.rid==m.rid and m1.aid!=m.aid :
break
else:
T.append(m)
for a in range(len(A)):
for b in range(len(B)):
if A[a].rid==B[b].rid:
for j in range(len(C)):
for k in range(len(S)):
if C[j].rid==S[k].rid and C[j].rid==B[b].rid and C[j].chainid==B[b].chainid==S[k].chainid==A[a].chainid :
for m in range(len(T)):
if getlen(S[k],T[m])>=s_s_l and getlen(S[k],T[m])<=s_s_u and S[k].rid<T[m].rid :
for n in range(len(D)):
for e in range(len(E)):
if E[e].rid==D[n].rid:
for f in range(len(F)):
if D[n].rid==T[m].rid and E[e].rid==F[f].rid and D[n].chainid==T[m].chainid==E[e].chainid==F[f].chainid :
a1=getangledihedral(A[a],B[b],C[j],S[k])
a2=getangledihedral(B[b],C[j],S[k],T[m])
a3=getangledihedral(C[j],S[k],T[m],D[n])
a4=getangledihedral(S[k],T[m],D[n],E[e])
a5=getangledihedral(T[m],D[n],E[e],F[f])
dse=getdihedralstrain(a1,a2,a3,a4,a5)
intr.append([])
intr[len(intr)-1].append(filename)
intr[len(intr)-1].append(C[j].chainid)
intr[len(intr)-1].append(C[j].rid)
intr[len(intr)-1].append(T[m].rid)
intr[len(intr)-1].append(T[m].chainid)
intr[len(intr)-1].append(getlen(C[j],S[k]))
intr[len(intr)-1].append(getlen(T[m],S[k]))
intr[len(intr)-1].append(getlen(T[m],D[n]))
intr[len(intr)-1].append(a1)
intr[len(intr)-1].append(a2)
intr[len(intr)-1].append(a3)
intr[len(intr)-1].append(a4)
intr[len(intr)-1].append(a5)
intr[len(intr)-1].append(dse)
C=[]
T=[]
D=[]
S=[]
SX=[]
TX=[]
A=[]
B=[]
E=[]
F=[]
for line in intr:
for xxd in line:
fileout.write(str(xxd))
fileout.write('\t')
fileout.write('\n')
intr=[]
fileout.close()
fileout=open('out_C-S-S-C_BACKBONE_scan.txt','a')
fileout.close()
f1.close()
|
9,878 | 74b38599dd793282612a468a760f6301b9f039d6 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
from . import models
from . import wizards
from odoo import api, SUPERUSER_ID
from odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes
def _preserve_tag_on_taxes(cr, registry):
preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')
env = api.Environment(cr, SUPERUSER_ID, {})
accounts = env['account.account'].search([('code', 'in', ['5301','5121','999999'])])
accounts.unlink()
journal_id = env['account.journal'].search([('name', '=', 'Cash'),('type', '=', 'cash')],limit=1)
if journal_id:
account = env['account.account'].search([('code', '=', '53000001')],limit=1)
journal_id.write({
'default_debit_account_id': account.id,
'default_credit_account_id': account.id
})
|
9,879 | 71ac7240287b83be6ec1f2d98e3ee531a8a219e0 | import os.path
import bcolz
import numpy as np
import zmq
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
if not os.path.exists('db'):
print("db not found, creating")
ct = bcolz.ctable([np.empty(0, dtype="i8")],
names=['data'],
rootdir='db',
)
else:
print("db found, initializing")
ct = bcolz.open('db')
while True:
message = socket.recv()
print("Received request: %s" % message)
ct.append((message,))
ct.flush()
socket.send(b"OK")
|
9,880 | e14319e705a3c1cdf85e0a2fe77c211e2afa9baa | # -------------------------------------------
# MODULES
# -------------------------------------------
import sys
import platform
if(platform.system()== "Windows"):
dir_sep = "\\"
else:
dir_sep = "/"
import time
import os
import numpy as np
import subprocess
import math
from mathutils import Vector
try:
from CifFile import CifFile
pars_check = False
except:
print("PyCIFRW not installed, try: pip install PyCifRW")
pars_check = True
try:
import bpy
Blender_env = True
except:
print("Not in blender environment.")
# -------------------------------------------
# VARIABLES
# -------------------------------------------
# global variables
file_path = "Select a file" # path to CIF-file
draw_bonds = False # draws bonds between atoms
draw_style = "SPACE FILLING" # sets draw style
draw_quality = "MED" # sets key for qualitydic
draw_lattice = False # draws unit cell outline
atom_name = False # displays names of atoms
bond_distance = 2 # set the max distance between bound atoms
lattice_size = 0.03 # sets size of lattice borders
bond_radius = 0.05 # radius of bond
add_camera = True # render final image
atom_color = True # draw atoms in color
user_feedback = "" # feedback for the user
print_data = True
# dictionaries
# sets detail of spheres
styledic = {
"SPACE FILLING" : [1,0],
"BALL AND STICK" : [0.5,0],
"STICK" : [0,1]
}
# sets detail of spheres
qualitydic = {
"MIN" : 8,
"LOW" : 16,
"MED" : 32,
"HIGH" : 64,
"MAX" : 128
}
'''
Uncomment this when no external dictionaries are found
# dictionary which couples atoms to a color
colordic = {
"O" : [1,0,0],
"Si" : [0.25,0.25,1],
"Fe" : [1,0.2,0.2],
}
# dictionary which couples atoms to a specific size
sizedic = {
"O" : 0.3,
"Si" : 0.6,
"Fe" : 1.4,
}
'''
# Read in dictionaries from external files
path = os.path.dirname(os.path.realpath(__file__))
# dictionary which couples atoms to a color
# Color scheme, in RGB percentages, following the CPK convention was extracted from https://en.wikipedia.org/wiki/CPK_coloring#Typical_assignments
# data can be changed by modifying the values in colordic.txt
with open(path+dir_sep+'colordic.txt','r') as inf:
colordic = eval(inf.read())
# dictionary which couples atoms to a specific size
# Atom data, in Ångström, was extracted from https://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# data can be changed by modifying the values in sizedic.txt
with open(path+dir_sep+'sizedic.txt','r') as inf:
sizedic = eval(inf.read())
# ----------------------------------------------
# BLENDER ADD-ON
# ----------------------------------------------
# add-on info
bl_info = {
"name": "Crystallographic Drawing Tool for Blender",
"description": "Add-on for drawing crystals from CIF-files.",
"author": "Jarrit Boons",
"blender": (2, 80,0),
"location": "View3D",
"category": "Crystallography in Blender"
}
# Operator to open the file browser and select a file
class ScanFileOperator(bpy.types.Operator):
bl_idname = "error.scan_file"
bl_label = "Scan file for return"
filepath = bpy.props.StringProperty(subtype="FILE_PATH")
def execute(self, context):
global file_path
global user_feedback
user_feedback = ""
file_path = self.filepath
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.types.Scene.path_to_file = bpy.props.StringProperty(
name="",
description="Path to CIF file",
default = "empty"
)
# Operator to hold CDTB-data and program execution
class Operator(bpy.types.Operator):
bl_idname = "object.cdtb_operator"
bl_label = "CDTB_operator"
bl_descriptor = "Operator for drawing crystal"
# Runs the whole program
def execute(self, context):
global pars_check
global user_feedback
if(pars_check):
user_feedback = "CiFFile module not installed"
return {'FINISHED'}
if(file_path == "Select a file"):
print("No file selected")
user_feedback = "No File selected"
else:
user_feedback = "Crystal drawn"
global draw_bonds
draw_bonds = context.scene.draw_bonds
global bond_distance
bond_distance = context.scene.bond_distance
global draw_lattice
draw_lattice = context.scene.draw_lattice
global atom_name
atom_name = context.scene.atom_name
global print_data
print_data = context.scene.print_data
global draw_style
global atom_color
draw_style = context.scene.style_selection_mode
if(draw_style=="STICK"):
draw_bonds = True
atom_color = False
else:
atom_color = True
global draw_quality
draw_quality = context.scene.quality_selection_mode
global add_camera
add_camera = context.scene.add_camera
drawCrystal(file_path)
return {'FINISHED'}
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
bpy.types.Scene.draw_bonds = bpy.props.BoolProperty(
name="Draw bonds",
description="Draw bonds between elements"
)
bpy.types.Scene.bond_distance = bpy.props.FloatProperty(
name="Bond distance",
description="Set max distance for bonds to occur",
default=2,
min=0.0,
max=10.0,
precision=2
)
bpy.types.Scene.atom_name = bpy.props.BoolProperty(
name="Atom names",
description="Display the name of atoms"
)
bpy.types.Scene.draw_lattice = bpy.props.BoolProperty(
name="Draw lattice",
description="Draw unit cell outline"
)
bpy.types.Scene.print_data = bpy.props.BoolProperty(
name="Print data",
description="Print crystal data in terminal"
)
# Dropdown menu for drawing style
selection_style = [
("SPACE FILLING", "SPACE FILLING", "", 1),
("BALL AND STICK", "BALL AND STICK", "", 2),
("STICK", "STICK", "", 3),
]
bpy.types.Scene.style_selection_mode = bpy.props.EnumProperty(
items=selection_style,
name="Style"
)
# Dropdown menu for drawing quality
selection_qual = [
("MIN", "MIN", "", 1),
("LOW", "LOW", "", 2),
("MED", "MED", "", 3),
("HIGH", "HIGH", "", 4),
("MAX", "MAX", "", 5)
]
bpy.types.Scene.quality_selection_mode = bpy.props.EnumProperty(
items=selection_qual,
name="Quality",
default="MED"
)
bpy.types.Scene.add_camera = bpy.props.BoolProperty(
name="Place camera",
description="Place a camera and light to make rendering possible"
)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
# Panel to display add-on in Blender environment
class Panel(bpy.types.Panel):
bl_idname = "CDTB_Panel"
bl_label = "CDTB_Panel"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_context = "objectmode"
bl_category = "CDTB"
def draw(self,context):
scn = context.scene
layout = self.layout
layout.label(text = 'Input file',icon_value=112)
'''
for i in range(100):
layout.label(text = str(i),icon_value =i)
'''
box = layout.box()
row = box.row()
splitrow = row.split(factor=0.075)
left_col = splitrow.column()
right_col = splitrow.column()
left_col.operator('error.scan_file',icon_value=108,text="")
right_col.label(text=file_path.rsplit('\\', 2)[-1])
layout.label(text = 'Settings',icon_value =117)
box = layout.box()
box.prop(scn,'draw_bonds')
box.prop(scn,'bond_distance')
box.prop(scn,'draw_lattice')
box.prop(scn, 'atom_name')
box.prop(scn,'print_data')
box.prop(scn, 'style_selection_mode')
box.prop(scn, 'quality_selection_mode')
box.prop(scn, 'add_camera')
layout.separator()
splitrow = layout.split(factor=0.3)
col = splitrow.column()
col.operator('object.cdtb_operator',text="Draw Crystal")
col = splitrow.column()
col.label(text=user_feedback)
layout.separator()
@classmethod
def register(cls):
print("Registered class: %s " % cls.bl_label)
@classmethod
def unregister(cls):
print("Unregistered class: %s " % cls.bl_label)
def register():
bpy.utils.register_class(Operator)
bpy.utils.register_class(ScanFileOperator)
bpy.utils.register_class(Panel)
def unregister():
bpy.utils.unregister_class(Operator)
bpy.utils.unregister_class(Panel)
bpy.utils.unregister_class(ScanFileOperator)
#----------------------------------------------
# MAIN PROGRAM
#----------------------------------------------
class Crysdata():
def __init__(self,F,cb):
self.start = time.time()
print("Draw timer started")
self.name = F
self.cell = Cell(cb)
self.atoms = readEl(cb)
self.pos = readPos(cb)
c = self.cell
self.ftoc = self.get_fractional_to_cartesian_matrix(c.alen,c.blen,c.clen,c.alpha,c.beta,c.gamma)
def printout(self):
print(self.name)
print()
self.cell.printout()
print()
for element in self.pos:
element.printout()
print()
for element in self.atoms:
element.printout()
print()
print("Fractional to cartesian matrix:")
print(self.ftoc)
def get_fractional_to_cartesian_matrix(self,a, b, c, alpha, beta, gamma):
"""
Original code found at: https://gist.github.com/Bismarrck/a68da01f19b39320f78a
!changed formula to resemble one found on: https://en.wikipedia.org/wiki/Fractional_coordinates
Return the transformation matrix that converts fractional coordinates to
cartesian coordinates.
Parameters
----------
a, b, c : float
The lengths of the edges.
alpha, gamma, beta : float
The angles between the sides.
angle_in_degrees : bool
True if alpha, beta and gamma are expressed in degrees.
Returns
-------
r : array_like
The 3x3 rotation matrix. ``V_cart = np.dot(r, V_frac)``.
"""
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
cosa = np.cos(alpha)
sina = np.sin(alpha)
cosb = np.cos(beta)
sinb = np.sin(beta)
cosg = np.cos(gamma)
sing = np.sin(gamma)
volume = 1.0 - cosa**2.0 - cosb**2.0 - cosg**2.0 + 2.0 * cosa * cosb * cosg
volume = a*b*c*np.sqrt(volume)
r = np.zeros((3, 3))
r[0, 0] = float(a)
r[0, 1] = float(b * cosg)
r[0, 2] = float(c * cosb)
r[1, 0] = float(0)
r[1, 1] = float(b * sing)
r[1, 2] = float(c * (cosa - cosb * cosg) / sing)
r[2, 0] = float(0)
r[2, 1] = float(0)
r[2, 2] = float(volume / (a*b*sing))
return r
def drawCrystal(self):
if draw_lattice:
self.drawCell()
print("Lattice drawn after {:.3f} seconds".format((time.time()-self.start)))
self.drawAtoms()
print("Atoms drawn after {:.3f} seconds".format((time.time()-self.start)))
if(draw_bonds):
self.drawBonds()
print("Bonds drawn after {:.3f} seconds".format((time.time()-self.start)))
def drawAtoms(self):
for a in self.atoms:
a.drawObj(self.ftoc)
print("Atoms drawn:",len(self.atoms))
def drawCell(self):
cell_corners=[]
cell_edges=[]
# calculate and draw corners
for i in range(2):
for j in range(2):
for k in range(2):
bpy.ops.mesh.primitive_uv_sphere_add(size=lattice_size,location=toCarth(self.ftoc,[i,j,k]))
activeObject = bpy.context.active_object # Set active object to variable
cell_corners.append(activeObject)
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
# draw lines
for i,j in zip([0,0,0,1,1,2,2,3,4,4,5,6],[1,2,4,3,5,3,6,7,5,6,7,7]):
cell_edges.append(self.drawLine(cell_corners[i].location,cell_corners[j].location))
# select all line and corners
for i in cell_corners:
i.select_set(action="SELECT")
for i in cell_edges:
i.select_set(action="SELECT")
# set corner in origin as active and join meshes as one object
bpy.context.view_layer.objects.active = cell_corners[0]
bpy.ops.object.join()
print("Cell box drawn")
def drawLine(self,ac,tc):
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
bpy.ops.mesh.primitive_cylinder_add(vertices=qualitydic[draw_quality],radius=lattice_size,depth = dist,location = (dx/2 + ac[0], dy/2 + ac[1], dz/2 + ac[2]))
activeObject = bpy.context.active_object
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [0,0,0] # change color
phi = math.atan2(dy, dx)
theta = math.acos(dz/dist)
bpy.context.object.rotation_euler[1] = theta
bpy.context.object.rotation_euler[2] = phi
return activeObject
def drawBonds(self):
cnt = 0
bpy.ops.curve.primitive_bezier_circle_add(location=(0,0,0),radius = bond_radius)
bpy.context.object.name = 'bez'
for atom in self.atoms:
for target in self.atoms:
if atom != target:
if("bond{}-{}".format(target.elid,atom.elid)in bpy.data.objects):
continue
if(atom.sym == 'H' and target.sym == 'H'):
continue
if calcDistance(self.ftoc,atom,target) <= bond_distance:
self.makeBond(atom,target)
cnt += 1
print("Atom bonds drawn:",cnt)
# This function hooks the bond to the atoms
def makeBond(self,atom,target):
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
o1 = bpy.data.objects[atom.elid]
o2 = bpy.data.objects[target.elid]
bond = self.hookCurve(o1,o2, bpy.context.scene)
bpy.context.object.data.bevel_object = bpy.data.objects["bez"]
bpy.context.object.name = "bond{}-{}".format(atom.elid,target.elid)
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
bpy.context.object.active_material.diffuse_color = [255,255,255] # change color
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
def hookCurve(self,o1, o2, scn):
curve = bpy.data.curves.new("link", 'CURVE')
curve.dimensions = '3D'
spline = curve.splines.new('BEZIER')
spline.bezier_points.add(1)
p0 = spline.bezier_points[0]
p1 = spline.bezier_points[1]
# p0.co = o1.location
p0.handle_right_type = 'VECTOR'
# p1.co = o2.location
p1.handle_left_type = 'VECTOR'
obj = bpy.data.objects.new("link", curve)
m0 = obj.modifiers.new("alpha", 'HOOK')
m0.object = o1
m1 = obj.modifiers.new("beta", 'HOOK')
m1.object = o2
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
# Reassign the points
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
# Hook first control point to first atom
p0.select_control_point = True
p1.select_control_point = False
bpy.ops.object.hook_assign(modifier="alpha")
# Hook second control point to first atom
p0 = curve.splines[0].bezier_points[0]
p1 = curve.splines[0].bezier_points[1]
p1.select_control_point = True
p0.select_control_point = False
bpy.ops.object.hook_assign(modifier="beta")
return obj
class Cell():
def __init__(self,cb):
self.alen = float(cb["_cell_length_a"])
self.blen = float(cb["_cell_length_b"])
self.clen = float(cb["_cell_length_c"])
self.alpha = float(cb["_cell_angle_alpha"])
self.beta = float(cb["_cell_angle_beta"])
self.gamma = float(cb["_cell_angle_gamma"])
def printout(self):
print("alen:{:8} \nblen:{:8} \nclen:{:8} \nalpha:{:8} \nbeta: {:8} \ngamma:{:8}".format(self.alen,self.blen,self.clen,self.alpha,self.beta,self.gamma))
class Atom():
def __init__(self,elid,sym,xpos,ypos,zpos):
self.elid = elid
self.sym = sym
self.xpos = float(xpos)
self.ypos = float(ypos)
self.zpos = float(zpos)
def printout(self):
print("id:{:3} symbol:{:2} x:{:.4f} y:{:.4f} z:{:.4f}".format(self.elid,self.sym,self.xpos,self.ypos,self.zpos))
def drawObj(self,ftoc):
size = sizedic[self.sym]*styledic[draw_style][0]+bond_radius*styledic[draw_style][1]
bpy.ops.mesh.primitive_uv_sphere_add(segments=qualitydic[draw_quality],ring_count=qualitydic[draw_quality]/2,size=size,location=toCarth(ftoc,[self.xpos,self.ypos,self.zpos]))
bpy.context.object.name = self.elid
activeObject = bpy.context.active_object # Set active object to variable
mat = bpy.data.materials.new(name="MaterialName") # set new material to variable
activeObject.data.materials.append(mat) # add the material to the object
if(atom_name):
bpy.context.object.show_name = True
if(atom_color):
bpy.context.object.active_material.diffuse_color = colordic[self.sym] # change color to dictionary color
else:
bpy.context.object.active_material.diffuse_color = [1,1,1] # change color to white
class sympos():
def __init__(self,string):
self.xsym = (string[0].split(','))[0]
self.ysym = (string[0].split(','))[1]
self.zsym = (string[0].split(','))[2]
def printout(self):
print("x:{:8} y:{:8} z:{:8}".format(self.xsym,self.ysym,self.zsym))
def readEl(cb):
elements = []
previd = []
idcnt = []
lb = cb.GetLoop("_atom_site_label")
for el in lb:
flag = False
for i in range(len(previd)):
if(el[0] == previd[i]):
flag = True
break
if(flag):
idcnt[i] += 1
else:
previd.append(el[0])
idcnt.append(0)
i = len(idcnt)-1
id_t = "{}.{}".format(el[0],idcnt[i])
elements.append(Atom(id_t,el[1],el[2],el[3],el[4]))
return elements
def readPos(cb):
positions = [];
lb = cb.GetLoop("_symmetry_equiv_pos_as_xyz")
for el in lb:
positions.append(sympos(el))
return positions
def obabel_fill_unit_cell(cif_file, p1_file):
# Convert symmetry to P1 using openbabel as subprocess
# Notation: obabel [-i<input-type>] <infilename> [-o<output-type>] -O<outfilename> [Options]
subprocess.run(['obabel', '-icif', cif_file, '-ocif', '-O', p1_file, '--fillUC', 'keepconnect'])
def calcDistance(ftoc,atom1,atom2):
ac = toCarth(ftoc,[atom1.xpos,atom1.ypos,atom1.zpos])
tc = toCarth(ftoc,[atom2.xpos,atom2.ypos,atom2.zpos])
dx = tc[0] - ac[0]
dy = tc[1] - ac[1]
dz = tc[2] - ac[2]
dist = np.sqrt(dx**2 + dy**2 + dz**2)
return dist
def toCarth(ftoc,V_frac):
return np.dot(ftoc, V_frac)
def look_at(obj_camera, point):
loc_camera = obj_camera.matrix_world.to_translation()
direction = point - loc_camera
# point the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
def addCamera(x,y,z):
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=(5*x,5*y,5*z))
print("camera added")
bpy.ops.object.light_add(type='SUN', view_align=False, location=(0, 0, 0))
obj_camera = bpy.data.objects["Camera"]
look_at(obj_camera, Vector([0,0,z/4]))
obj_camera.data.type = 'ORTHO'
obj_camera.data.ortho_scale = ((x+y+z))
def clearWS():
if 'OBJECT'!=bpy.context.mode:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
# remove all previous curves
for i in bpy.data.curves:
bpy.data.curves.remove(i)
# remove all previous materials
for m in bpy.data.materials:
bpy.data.materials.remove(m)
# remove all previous camera's
for c in bpy.data.cameras:
bpy.data.cameras.remove(c)
print("Workspace cleared.")
return
def drawCrystal(file):
# Check if file is file:
S = time.time()
global user_feedback
ext = file[len(file)-4:]
if(ext.lower() != ".cif"):
print("Only cif files can be visualised")
user_feedback = "Not a cif file"
return
# Check OpenBabel installation
try:
# Convert the cif file to its P1 symmetry notation as a temporary cif file
print('Converting %s to P1' %file)
obabel_fill_unit_cell(file, "temp.CIF")
cf = CifFile("temp.CIF")
except:
print("No OpenBabel installation found, install it from http://openbabel.org/wiki/Category:Installation")
user_feedback = "OpenBabel not installed"
#cf = CifFile(file) CifFile apparently can't read in long filepaths
return
# Open and parse our cif
f = file.rsplit(dir_sep, 1)[-1]
F = f[:3]
print(f)
cb = cf.first_block()
Crystal = Crysdata(F,cb)
# Print crystal data in terminal if checked
if(print_data):
Crystal.printout()
print("Crystal data read after "+ str(time.time() - S) + " seconds")
# Draw crystal if in Blender environment
if(Blender_env):
clearWS()
Crystal.drawCrystal()
bpy.ops.object.select_all(action='DESELECT')
if(add_camera):
addCamera(Crystal.cell.alen,Crystal.cell.blen,Crystal.cell.clen)
|
9,881 | 40b94a3be27ebb0d8e3e67fddabe1dc68646169c | from firstfuncs_1618 import *
figdir='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'
figdir_paper='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'
########################################################################################################
########################################################################################################
#### Set up the optimization framework, which allows for varying almost all elements within a prescribed range
########################################################################################################
########################################################################################################
WM=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_2008.nc')
WM_mb=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_mb_2008.nc')
cp=3850
rhow=1025
tera=10**12
#Noresm (taking sea ice into account)
Q=-251*tera/rhow/cp/1e6 #for the Sverdrups
def get_U_S_T_from_WM(WM):
U={}
S={}
T={}
for wm in WM.WM:
U[str(wm.values)]=float(WM['TRANS'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)]=float(WM['PSAL'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)]=float(WM['PTMP'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
U['SI']=0.073 # NorESM fresh water input v. similar to Kwok et al. 2004 70mSv
U['FW']=0.028 # mean E-P from JRA55
U['Q']=Q
S['SI']=0
S['FW']=0
T['SI']=0
T['FW']=0
T['Q']=1
return U,S,T
U,S,T=get_U_S_T_from_WM(WM)
U_mb,S_mb,T_mb=get_U_S_T_from_WM(WM_mb)
def get_U_from_x(x):
U={}
U['PWS']=x[0]
U['AWS']=x[1]
U['DWS']=x[2]
U['PWN']=x[3]
U['AWN']=x[4]
U['FW']=x[5]
U['SI']=x[6]
U['Q']=x[7]
return U
AM={}
x0={}
AM['base']=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
x0['base']=[U['PWS'],U['AWS'],U['DWS'],U['PWN'],U['AWN'],U['FW'],U['SI'],U['Q']]
AM['massbal']=array([[1,1,1,0,0,0.5,0.5,0],\
[0,0,0,1,1,0.5,0.5,0],\
[S_mb['PWS'],S_mb['AWS'],S_mb['DWS'],S_mb['PWN'],S_mb['AWN'],S_mb['FW'],S_mb['SI'],0],\
[T_mb['PWS'],T_mb['AWS'],T_mb['DWS'],T_mb['PWN'],T_mb['AWN'],T_mb['FW'],T_mb['SI'],1]])
x0['massbal']=[U_mb['PWS'],U_mb['AWS'],U_mb['DWS'],U_mb['PWN'],U_mb['AWN'],U_mb['FW'],U_mb['SI'],U_mb['Q']]
zz='base'
AM[zz].dot(x0[zz])
16/35
1.5/10
#vars that I want to be handy for later calcs
Snorm=35
Tnorm=5
def run_inverse_model(zz,U,S,T):
dv=-AM[zz].dot(x0[zz])
if zz=='base':
Winv=diag([1,1/Snorm,1/Tnorm])
elif zz=='massbal':
Winv=diag([1,1,1/Snorm,1/Tnorm])
Evec=array([xx/5 for xx in x0[zz]])
# Evec=hstack((5*[1],0.02,0.02,Qvar))
E=diag(Evec)
Umat,D,VmatT=linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
xbase=x0[zz]+xsol_Ad
P=diag(E-E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T))+linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase=get_U_from_x(xbase)
Ue=get_U_from_x(P)
return Ubase,Ue,xbase
Ubase,Ue,xbase=run_inverse_model('base',U,S,T)
Umb_sol,Umb_err,xmb=run_inverse_model('massbal',U_mb,S_mb,T_mb)
coldic={'AWS':'red','DWS':'grey','PWS':'royalblue','PWN':'purple','AWN':'orange','SI':'cyan','FW':'cyan','Q':'limegreen'}
def plot_base_case_simple(Ubase,Ue,plt):
f,axx=subplots(1,4,figsize=(9,2.5),constrained_layout=True,gridspec_kw=dict(width_ratios=[2,3,1,1]))
alf=0.75
capi=7
#U
axx[0].bar(range(2),[Ubase[kk] for kk in ['AWS','DWS']],color=[coldic[kk] for kk in ['AWS','DWS']],yerr=[Ue[kk] for kk in ['AWS','DWS']],capsize=capi,alpha=alf)
axx[0].plot(range(2),[U[kk] for kk in ['AWS','DWS']],'o',color='k')
ylimi=20
axx[0].set_ylim(-ylimi,ylimi)
ylimi=4
axx[1].set_ylim(-ylimi,ylimi)
axx[1].bar(range(3),[Ubase[kk] for kk in ['PWS','PWN','AWN']],color=[coldic[kk] for kk in ['PWS','PWN','AWN']],yerr=[Ue[kk] for kk in ['PWS','PWN','AWN']],capsize=capi,alpha=alf)
axx[1].plot(range(3),[U[kk] for kk in ['PWS','PWN','AWN']],'o',color='k')
axx[2].bar(range(1),U['SI']+Ubase['FW'],color=coldic['FW'],yerr=Ue['SI']+Ue['FW'],capsize=capi,alpha=alf)
axx[2].plot(range(1),U['SI']+U['FW'],'o',color='k')
fwlim=0.2
axx[2].set_ylim(-fwlim,fwlim)
fsz=14
axx[0].set_ylabel('Volume transport [Sv]',fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]',fontsize=fsz)
axx[3].bar(0,cp*rhow*(Ubase['Q'])/1e6,color=coldic['Q'],yerr=cp*rhow*Ue['Q']/1e6,capsize=capi,alpha=alf)
axx[3].plot(0,cp*rhow*(U['Q'])/1e6,'o',color='k')
for ii in range(3):
axx[ii].axhline(0,color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS','DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS','PWN','AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.pdf',bbox_inches='tight')
plot_base_case_simple(Ubase,Ue,'base')
U
Ubase['SI']+Ubase['FW']
Ubase['Q']*cp*rhow/1e6
basediff=[(kk,Ubase[kk]-U[kk]) for kk in Ubase]
basediff
plot_base_case_simple(Umb_sol,Umb_err,'mb')
[(kk,Umb_sol[kk]-U_mb[kk]) for kk in Ubase]
##################################################################################
# Calculate fraction of fresh water vs. other water masses that goes into each limb
#################################################################################
#fraction of PWN in DWS limb
epsilon=arange(0,1.1,0.1)
def get_a_b_fracs(Ubase,S):
#fraction of FW in PWS, as a function of epsilon
a=((1-epsilon)*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['PWS']*(S['PWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])
#fraction of FW in DWS, as a function of epsilon
b=(epsilon*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['DWS']*(S['DWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])
return a,b
S['PWN']/S['AWS']
S['PWS']/S['AWS']
S['DWS']/S['AWS']
Ubase['PWS']
Ubase['DWS']
Ubase['PWN']*(S['PWN']/S['AWS']-1)
Ubase['PWS']*(S['PWS']/S['AWS']-1)
Ubase['DWS']*(S['DWS']/S['AWS']-1)
(Ubase['FW']+Ubase['SI'])
a={}
b={}
a['base'],b['base']=get_a_b_fracs(Ubase,S)
a['mb'],b['mb']=get_a_b_fracs(Umb_sol,S_mb)
[(kk,S[kk]-S_mb[kk]) for kk in S]
def plot_adep():
for ii,kk in enumerate(a):
plot(1-epsilon,a[kk],linewidth=3,label=kk,color='C'+str(ii))
xlabel('$\mathbf{1-\epsilon}$\nfraction of PWN in PWS')
ylabel('$\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0,1)
axhline(0,color='k')
legend()
savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.pdf',bbox_inches='tight')
plot_adep()
#################################################################################
##### Look into how much Sea ice properties matter
#################################################################################
sivar={}
for S_SI in range(0,10,2):
sivar[S_SI]={}
for T_SI in range(-90,5,10):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S_SI,0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T_SI,1]])
dv=-AM.dot(xbase)
Evec=array(hstack(([1]*5,xbase[-3:]/5)))
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
sivar[S_SI][T_SI]=xbase+xsol_Ad
def get_mats_from_dic(sivar):
Svec=array([float(ff) for ff in sivar])
Tvec=array([float(ff) for ff in sivar[Svec[0]]])
simats={}
for QQ,kk in enumerate(Ubase):
simats[kk]=zeros((len(Svec),len(Tvec)))
for ii,ss in enumerate(Svec):
for jj,tt in enumerate(Tvec):
simats[kk][ii,jj]=sivar[ss][tt][QQ]
return Svec,Tvec,simats
Svec,Tvec,simats=get_mats_from_dic(sivar)
def plot_SIresponse():
f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)
axivec=array([])
for axirow in axx:
for axi in axirow:
axivec=hstack((axivec,axi))
for axi,kk in zip(axivec,simats):
if (kk=='FW') | (kk=='SI'):
climi=10
contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)
axi.set_title(kk+' [mSv]')
cbar=colorbar(contit,ax=axi,format='%1.0f')
elif kk=='Q':
climi=30
contit=axi.contourf(Svec,Tvec,cp*rhow*(simats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)
axi.set_title(kk+' [TW]')
cbar=colorbar(contit,ax=axi,format='%2.0f')
else:
climi=0.3
contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)
axi.set_title(kk+' [Sv]')
cbar=colorbar(contit,ax=axi,format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center',fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\circ$C]', va='center',rotation='vertical',fontsize=14)
savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.pdf',bbox_inches='tight')
plot_SIresponse()
contourf(simats['AWN'].T-Ubase['AWN']+simats['PWN'].T-Ubase['PWN'])
colorbar()
#################################################################################
##### Test dependence on PW salinity (both north and south)
#################################################################################
pwsvar={}
for S_PWNa in arange(-1,0.05,0.1):
pwsvar[S_PWNa]={}
for S_PWSa in arange(-1.0,0.05,0.1):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS']+S_PWSa,S['AWS'],S['DWS'],S['PWN']+S_PWNa,S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
dv=-AM.dot(xbase)
Evec=array(hstack(([1]*5,xbase[-3:]/5)))
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
pwsvar[S_PWNa][S_PWSa]=xbase+xsol_Ad
PWN_Svec,PWS_Svec,pwmats=get_mats_from_dic(pwsvar)
####################################################################################################
######## Response is pretty uniform: try to tease out a pattern (and look at other deps?) #######
##################################################################################################
PWN_Smat,PWS_Smat=meshgrid(PWN_Svec,PWS_Svec)
U_si=get_U_from_x(sivar[0][-30])
U_pw=get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])
(U_pw['FW']+U_pw['SI']-(Ubase['FW']+Ubase['SI']))*1e3
U_pw['FW']+U_pw['SI']
Ubase['FW']+Ubase['SI']
U_si
[(kk,U_si[kk]-Ubase[kk]) for kk in Ubase]
[U_si[kk]-Ubase[kk] for kk in Ubase][-1]*cp*rhow/1e6
U_pw['Q']*cp*rhow/1e6
def lineplot_PW_salinity():
f,axx=subplots(1,3,figsize=(11,3),sharey=True)
xind=-1
yind=-1
svr=len(PWS_Svec)
xvar=[(S['PWN']+PWN_Smat)[xind,:],(S['PWS']+PWS_Smat)[:,yind],[(S['PWS']+PWS_Smat)[ii,ii] for ii in range(svr)]]
ufw_tot=-Ubase['SI']-Ubase['FW']
yvar_fw=[pwmats['FW'].T[xind,:]+pwmats['SI'].T[xind,:]+ufw_tot,pwmats['FW'].T[:,yind]+pwmats['SI'].T[:,yind]+ufw_tot,array([pwmats['FW'].T[ii,ii]+pwmats['SI'].T[ii,ii]+ufw_tot for ii in range(svr)])]
yvar_Q=[pwmats['Q'].T[xind,:]-Ubase['Q'],pwmats['Q'].T[:,yind]-Ubase['Q'],array([pwmats['Q'].T[ii,ii]-Ubase['Q'] for ii in range(svr)])]
xlab=['PWN salinity','PWS salinity','PWS salinity']
titvec=['a) Vary PWN salinity\n\nPWS = 34.4','b) Vary PWS salinity\n\nPWN = 33.7','c) Vary both PW salinities']
lw=2
for kk in ['AWS','PWS','DWS','AWN','PWN']:
axx[0].plot(xvar[0],(pwmats[kk].T[xind,:]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)
axx[1].plot(xvar[1],(pwmats[kk].T[:,yind]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)
axx[2].plot(xvar[2],array([(pwmats[kk].T[ii,ii]-Ubase[kk])for ii in range(svr)]),color=coldic[kk],label=kk,linewidth=lw)
for ii in range(3):
ax1=axx[ii].twinx()
for ll in ['']:
ax1.plot(xvar[ii],(yvar_fw[ii])*1e3,color='c',linewidth=lw)
ax2=axx[ii].twinx()
ax2.plot(xvar[ii],cp*rhow*(yvar_Q[ii])/1e6,color='limegreen',linewidth=lw)
axx[ii].set_xlabel(xlab[ii])
ax1.set_ylim(-10,10)
ax2.set_ylim(-40,40)
axx[ii].set_title(titvec[ii],fontweight='bold')
if ii!=2:
ax1.set_yticklabels('')
ax2.set_yticklabels('')
axx[ii].set_xlim(xvar[ii][0],xvar[ii][-1])
axx[0].set_ylim(-1.5,1.5)
axx[0].set_yticks(arange(-1,1.1,0.5))
ax2.spines["right"].set_position(("axes", 1.3))
axx[0].set_ylabel('Transport anomaly [Sv]')
ax1.set_ylabel('Fresh water flux anomaly [mSv]',color='c')
ax2.set_ylabel('Heat flux anomaly [TW]',color='limegreen')
ax1.tick_params(axis='y', colors='c')
ax2.tick_params(axis='y', colors='limegreen')
leg=axx[0].legend(loc=(0.5,-0.5),ncol=5,fontsize=13)
for line in leg.get_lines():
line.set_linewidth(4.0)
axi2=axx[2].twiny()
axi2.set_xticks(arange(32.8,33.8,0.2))
axi2.set_xlim(xvar[0][0],xvar[0][-1])
axi2.set_xlabel('PWN salinity')
axx[2].axvline(34.4-0.5,color='k',zorder=0)
# axx[0].set_title('a) Vary PWN salinities\n\n',fontweight='bold')
# axx[1].set_title('b) Vary PWS salinities\n\n',fontweight='bold')
# axx[2].set_title('c) Vary both PW salinities',fontweight='bold')
savefig(figdir_paper+'/PWS_dep.png',bbox_inches='tight')
savefig(figdir_paper+'/PWS_dep.pdf',bbox_inches='tight')
lineplot_PW_salinity()
37/(56+37+5)
#######################################################################################
############## What happens if we add more FW? (Like 100mSv) ###########################
#######################################################################################
Ubase['FW']
Ubase['SI']
fwvar={}
for U_FW in arange(0,0.11,0.01):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
xinit=xbase.copy()
xinit[5]=xinit[5]+U_FW
dv=-AM.dot(xinit)
Evec=xinit/5
Evec[5:7]=1e-10
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
fwvar[U_FW]=xinit+xsol_Ad
U_fwvar=get_U_from_x(fwvar[0.02])
a_fw,b_fw=get_a_b_fracs(U_fwvar,S)
U['FW']+U['SI']
Ubase['FW']+Ubase['SI']+0.05
U_fwvar['FW']+U_fwvar['SI']
U_fwvar['Q']*cp*rhow/1e6
U_fwvar
#######################################################################################
############## What happens if we add more FW and make PWS fresher? ###########################
#######################################################################################
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS']-0.5,S['AWS'],S['DWS'],S['PWN']-0.5,S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
xinit=xbase.copy()
xinit[5]=xinit[5]+0.02
dv=-AM.dot(xinit)
Evec=xinit/5
Evec[5:7]=1e-10
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
x_both=xinit+xsol_Ad
U_both=get_U_from_x(x_both)
S_PW=S.copy()
S_PW['PWS']=S['PWS']-0.5
S_PW['PWN']=S['PWN']-0.5
a_both,b_both=get_a_b_fracs(U_both,S_PW)
#######################################################################################
############## Now look at consequences for FW dist ###########################
#######################################################################################
a_pwmat=zeros((len(epsilon),shape(pwmats['Q'])[1],shape(pwmats['Q'])[0]))
b_pwmat=a_pwmat.copy()
for ii,ee in enumerate(1-epsilon):
a_pwmat[ii,:,:]=(ee*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['PWS'].T*((S['PWS']+PWS_Smat)/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)
b_pwmat[ii,:,:]=((1-ee)*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['DWS'].T*(S['DWS']/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)
c_pwmat=1-a_pwmat-b_pwmat
PWN_Smat[10,10]
PWS_Smat[10,10]
PWN_Smat[5,5]
PWS_Smat[5,5]
epsilon=arange(0,1.1,0.1)
fwcol='#43a2ca'
ash='d'
def plot_adep_pw():
f,axx=subplots(1,2,figsize=(11,3.2),sharex=True)
f.subplots_adjust(wspace=0.3)
for ii,var in enumerate([a_pwmat,b_pwmat]):
if ii==0:
xvar=(1-epsilon)
xvar2=1
xvar3=0
else:
xvar=epsilon
xvar2=0
xvar3=1
axx[ii].plot(xvar*Ubase['PWN'],var[:,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,linewidth=3,color='k',label='Base case',zorder=5)
axx[ii].plot(xvar*U_pw['PWN'],var[:,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,color='purple',zorder=4,label='Polar Waters fresher by 0.5',linewidth=3)
axx[ii].plot(xvar2*Ubase['PWN'],var[0,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,'o',color='k',label='',zorder=5)
axx[ii].plot(xvar2*U_pw['PWN'],var[0,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,'o',color='purple',zorder=4,label='')
axx[ii].plot(xvar3*Ubase['PWN'],var[-1,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,ash,color='k',label='',zorder=5)
axx[ii].plot(xvar3*U_pw['PWN'],var[-1,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,ash,color='purple',zorder=4,label='')
axx[ii].set_ylim(-30,140)
axx[0].plot((1-epsilon)*U_fwvar['PWN'],a_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol,label='Add 20 mSv of Fresh Water')
axx[1].plot(epsilon*U_fwvar['PWN'],b_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol)
axx[0].plot(U_fwvar['PWN'],a_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')
axx[1].plot(0,b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')
axx[0].plot(0,a_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')
axx[1].plot(U_fwvar['PWN'],b_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')
axx[0].plot(0.5,56,'*',color='k',label='',markersize=10)
axx[0].plot(1.1,56,'*',color='purple',label='',markersize=10)
axx[1].plot(1.3,37,'*',color='k',label='',markersize=10)
axx[1].plot(1,37,'*',color='purple',label='',markersize=10)
# axx[1].plot(U_fwvar['PWN'],b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'s',color='k',label='')
# axx[0].plot(1-epsilon,a_both,linewidth=3,color='g',label='Both')
# axx[1].plot(1-epsilon,b_both,linewidth=3,color='g')
axx[0].legend(loc=(0.05,-0.5),ncol=3,fontsize=12)
axx[0].set_title('a) Estuarine limb',fontsize=14)
axx[1].set_title('b) Overturning limb',fontsize=14)
axx[0].set_ylabel('$\mathbf{\delta}\ U_{FW}$\nFW transport in $\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel('$\mathbf{\gamma}\ U_{FW}$\nFW transport in $\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel('$\mathbf{(1-\epsilon)} \ U_{PWN}$\nPWN transport in $\mathbf{PWS}$ [Sv]')
axx[1].set_xlabel('$\mathbf{\epsilon} \ U_{PWN}$\nPWN transport in $\mathbf{DWS}$ [Sv]')
for axi in axx[0],axx[1]:
axi.axhline(0,color='k')
axi.set_xlim(-0.05,2.2)
axx[0].axhline(56,color='k',linestyle='--')
axx[1].axhline(37,color='k',linestyle='--')
savefig(figdir_paper+'/FWfrac_obs_pwdep.png',bbox_inches='tight')
savefig(figdir_paper+'/FWfrac_obs_pwdep.pdf',bbox_inches='tight')
plot_adep_pw()
def get_PWN_from_FW(x2,y1,y2,y3):
x3=(y3-y1)*x2/(y2-y1)
return x3
x3_base_PWS=get_PWN_from_FW(Ubase['PWN'],(Ubase['FW']+Ubase['SI'])*a_pwmat[-1,10,10]*1e3,(Ubase['FW']+Ubase['SI'])*a_pwmat[0,10,10]*1e3,50)
x3_base_PWS
Ubase['PWN']
1-x3_base_PWS/Ubase['PWN']
x3_fresh_PWS=get_PWN_from_FW(U_pw['PWN'],(U_pw['FW']+U_pw['SI'])*a_pwmat[-1,5,5]*1e3,(U_pw['FW']+U_pw['SI'])*a_pwmat[0,5,5]*1e3,50)
x3_fresh_PWS
U_pw['PWN']
def get_AWS_from_PWN(Uvar,Svar,eps):
alpha_U=-(Uvar['PWS']*Svar['PWS']+(1-eps)*Uvar['PWN']*Svar['PWN'])/Svar['AWS']
beta_U=-(Uvar['DWS']*Svar['DWS']+eps*Uvar['PWN']*Svar['PWN'])/Svar['AWS']
return alpha_U,beta_U
get_AWS_from_PWN(Ubase,S,0.65)
get_AWS_from_PWN(U_pw,S_PW,0.65)
############################graveyard
# def plot_in_each(axi):
# axi.plot(S['PWN'],S['PWS'],'ko',markersize=10)
# axi.plot(S['PWN']+PWN_Svec,S['PWN']+PWN_Svec,'r-',linewidth=3)
#
# def plot_PW_Sdep(Svec,Tvec,simats):
# f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)
# axivec=array([])
# for axirow in axx:
# for axi in axirow:
# axivec=hstack((axivec,axi))
# for axi,kk in zip(axivec,simats):
# if (kk=='FW') | (kk=='SI'):
# climi=20
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [mSv]')
# cbar=colorbar(contit,ax=axi,format='%1.0f')
# plot_in_each(axi)
# elif kk=='Q':
# climi=30
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,cp*rhow*(pwmats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [TW]')
# cbar=colorbar(contit,ax=axi,format='%2.0f')
# plot_in_each(axi)
# else:
# climi=1.5
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [Sv]')
# cbar=colorbar(contit,ax=axi,format='%0.2f')
# plot_in_each(axi)
# for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
# label.set_visible(False)
# axi.set_ylim(S['PWS']+PWS_Svec[0],S['PWS']+PWS_Svec[-1])
# f.text(0.5, 0, 'PWN salinity', ha='center',fontsize=14)
# f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)
#
# savefig(figdir_paper+'_extra_2004/PW_Sdep.png',bbox_inches='tight')
# savefig(figdir_paper+'_extra_2004/PW_Sdep.pdf',bbox_inches='tight')
#
#
# plot_PW_Sdep(PWN_Svec,PWS_Svec,pwmats)
# def plot_PW_Sdep_lines():
# f,axx=subplots(2,4,figsize=(15,6),sharex=True)
# axivec=array([])
# for axirow in axx:
# for axi in axirow:
# axivec=hstack((axivec,axi))
# for axi,kk in zip(axivec,simats):
# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[-2,:],(pwmats[kk].T[-2,:]),label='vary PWN salinity')
# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[:,-3],(pwmats[kk].T[:,-3]),label='vary PWS salinity')
# axi.plot(((S['PWN'])-(S['PWS'])),(Ubase[kk]),'ko',label='base case')
# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[5,5]),'ro',label='both 0.5 fresher')
# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[0,0]),'go',label='both 1 fresher')
# axi.set_title(kk)
# axi.legend(loc=(1,0.7))
# f.text(0.5, 0, 'PWN salinity - PWS salinity', ha='center',fontsize=14)
# # f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)
#
# # savefig(figdir_paper+'/PW_Sdep.png',bbox_inches='tight')
# # savefig(figdir_paper+'/PW_Sdep.pdf',bbox_inches='tight')
#
# plot_PW_Sdep_lines()
# Ubase.keys()
|
9,882 | 11952e60ab95bc1896fd899a5ced126dcafec63a | from django.shortcuts import render
from django.contrib import messages
from django.views.generic import View
from django.views.decorators.http import require_GET, require_POST
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect
from django.db.models import Count
from .forms import UrlForm
from .models import Link
import random
import string
def short_url_gen(stringLength=5):
"""Generate a random string of fixed length """
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(stringLength))
@require_GET
def Follow(request,shorturl):
link = get_object_or_404(Link,shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user = request.user)
else:
yl = None
context = {
'form' :UrlForm,
'links':yl,
't':toplink
}
return render(request, 'shortu.html', context)
@require_GET
def info(request,shorturl):
link = get_object_or_404(Link,shorturl=shorturl)
return render(request,'info.html',{'link':link})
@require_POST
def Submit(request):
form = UrlForm(request.POST)
if form.is_valid():
link = form.cleaned_data['url']
costom = form.cleaned_data['costom']
if costom:
if Link.objects.filter(shorturl=costom).exists():
#messages(request,"Costom url aready taken")
pass
else:
shorturl = costom
newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl)
return render(request,'info.html',{'link':newlink})
j=1
while j<11:
newshort = short_url_gen(j)
if Link.objects.filter(shorturl=costom).exists():
j+=1
continue
newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user)
return render(request,'info.html',{'link':newlink})
return render(request, 'home.html') |
9,883 | a8a2d672369f61c6412229380cc6097d152ba126 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 20 17:13:46 2017
@author: pmonnot
"""
import blpapi
import datetime
# Create a Session
session = blpapi.Session()
# Start a Session
if not session.start():
print "Failed to start session."
if not session.openService("//blp/refdata"):
print "Failed to open //blp/refdata"
refDataService = session.getService("//blp/refdata")
request = refDataService.createRequest("HistoricalDataRequest")
request.append("securities", "AAPL US Equity")
#FIELDS - if simply one field use: #request.append("fields", "PX_LAST")
#If you wish to loop the fields
field_list = ["PX_OPEN","PX_HIGH","PX_LAST","PX_VOLUME"]
for field in field_list:
request.append("fields", field)
request.set("startDate", "20170101")
request.set("endDate", "20170201")
request.set("adjustmentFollowDPDF", "False")
request.set("adjustmentAbnormal", "True")
request.set("adjustmentNormal", "True")
request.set("adjustmentSplit", "True")
request.set("periodicitySelection", "DAILY")
request.set("nonTradingDayFillOption", "NON_TRADING_WEEKDAYS") #also takes ALL_CALENDAR_DAYS and ACTIVE_DAYS_ONLY
request.set("nonTradingDayFillMethod", "PREVIOUS_VALUE")
print "Sending Request:", request
session.sendRequest(request)
endReached = False
while endReached == False:
ev = session.nextEvent()
if ev.eventType() == blpapi.Event.RESPONSE or ev.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in ev:
numPoints = msg.getElement("securityData").getElement("fieldData").numValues()
for i in range(0,numPoints):
Point = msg.getElement('securityData').getElement('fieldData').getValueAsElement(i)
print Point.getElement('date').getValue(),'\t',Point.getElement('PX_LAST').getValue(),'\t'
if ev.eventType() == blpapi.Event.RESPONSE:
endReached = True |
9,884 | a6670d0d09f02b674bc31b770f42d4d8a01a4a4e | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SoapySDR', [dirname(__file__)])
except ImportError:
import _SoapySDR
return _SoapySDR
if fp is not None:
try:
_mod = imp.load_module('_SoapySDR', fp, pathname, description)
finally:
fp.close()
return _mod
_SoapySDR = swig_import_helper()
del swig_import_helper
else:
import _SoapySDR
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _SoapySDR.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _SoapySDR.SwigPyIterator_value(self)
def incr(self, n=1): return _SoapySDR.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _SoapySDR.SwigPyIterator_decr(self, n)
def distance(self, *args): return _SoapySDR.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _SoapySDR.SwigPyIterator_equal(self, *args)
def copy(self): return _SoapySDR.SwigPyIterator_copy(self)
def next(self): return _SoapySDR.SwigPyIterator_next(self)
def __next__(self): return _SoapySDR.SwigPyIterator___next__(self)
def previous(self): return _SoapySDR.SwigPyIterator_previous(self)
def advance(self, *args): return _SoapySDR.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _SoapySDR.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _SoapySDR.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _SoapySDR.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _SoapySDR.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _SoapySDR.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _SoapySDR.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _SoapySDR.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
def KwargsFromString(*args):
return _SoapySDR.KwargsFromString(*args)
KwargsFromString = _SoapySDR.KwargsFromString
def KwargsToString(*args):
return _SoapySDR.KwargsToString(*args)
KwargsToString = _SoapySDR.KwargsToString
class Range(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Range, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Range, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SoapySDR.new_Range(*args)
try: self.this.append(this)
except: self.this = this
def minimum(self): return _SoapySDR.Range_minimum(self)
def maximum(self): return _SoapySDR.Range_maximum(self)
def step(self): return _SoapySDR.Range_step(self)
def __str__(self):
fields = [self.minimum(), self.maximum()]
if self.step() != 0.0: fields.append(self.step())
return ', '.join(['%g'%f for f in fields])
__swig_destroy__ = _SoapySDR.delete_Range
__del__ = lambda self : None;
Range_swigregister = _SoapySDR.Range_swigregister
Range_swigregister(Range)
class ArgInfo(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ArgInfo, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ArgInfo, name)
__repr__ = _swig_repr
def __init__(self):
this = _SoapySDR.new_ArgInfo()
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["key"] = _SoapySDR.ArgInfo_key_set
__swig_getmethods__["key"] = _SoapySDR.ArgInfo_key_get
if _newclass:key = _swig_property(_SoapySDR.ArgInfo_key_get, _SoapySDR.ArgInfo_key_set)
__swig_setmethods__["value"] = _SoapySDR.ArgInfo_value_set
__swig_getmethods__["value"] = _SoapySDR.ArgInfo_value_get
if _newclass:value = _swig_property(_SoapySDR.ArgInfo_value_get, _SoapySDR.ArgInfo_value_set)
__swig_setmethods__["name"] = _SoapySDR.ArgInfo_name_set
__swig_getmethods__["name"] = _SoapySDR.ArgInfo_name_get
if _newclass:name = _swig_property(_SoapySDR.ArgInfo_name_get, _SoapySDR.ArgInfo_name_set)
__swig_setmethods__["description"] = _SoapySDR.ArgInfo_description_set
__swig_getmethods__["description"] = _SoapySDR.ArgInfo_description_get
if _newclass:description = _swig_property(_SoapySDR.ArgInfo_description_get, _SoapySDR.ArgInfo_description_set)
__swig_setmethods__["units"] = _SoapySDR.ArgInfo_units_set
__swig_getmethods__["units"] = _SoapySDR.ArgInfo_units_get
if _newclass:units = _swig_property(_SoapySDR.ArgInfo_units_get, _SoapySDR.ArgInfo_units_set)
BOOL = _SoapySDR.ArgInfo_BOOL
INT = _SoapySDR.ArgInfo_INT
FLOAT = _SoapySDR.ArgInfo_FLOAT
STRING = _SoapySDR.ArgInfo_STRING
__swig_setmethods__["type"] = _SoapySDR.ArgInfo_type_set
__swig_getmethods__["type"] = _SoapySDR.ArgInfo_type_get
if _newclass:type = _swig_property(_SoapySDR.ArgInfo_type_get, _SoapySDR.ArgInfo_type_set)
__swig_setmethods__["range"] = _SoapySDR.ArgInfo_range_set
__swig_getmethods__["range"] = _SoapySDR.ArgInfo_range_get
if _newclass:range = _swig_property(_SoapySDR.ArgInfo_range_get, _SoapySDR.ArgInfo_range_set)
__swig_setmethods__["options"] = _SoapySDR.ArgInfo_options_set
__swig_getmethods__["options"] = _SoapySDR.ArgInfo_options_get
if _newclass:options = _swig_property(_SoapySDR.ArgInfo_options_get, _SoapySDR.ArgInfo_options_set)
__swig_setmethods__["optionNames"] = _SoapySDR.ArgInfo_optionNames_set
__swig_getmethods__["optionNames"] = _SoapySDR.ArgInfo_optionNames_get
if _newclass:optionNames = _swig_property(_SoapySDR.ArgInfo_optionNames_get, _SoapySDR.ArgInfo_optionNames_set)
__swig_destroy__ = _SoapySDR.delete_ArgInfo
__del__ = lambda self : None;
ArgInfo_swigregister = _SoapySDR.ArgInfo_swigregister
ArgInfo_swigregister(ArgInfo)
class SoapySDRKwargs(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargs, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargs, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRKwargs_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRKwargs___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRKwargs___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRKwargs___len__(self)
def __iter__(self): return self.key_iterator()
def iterkeys(self): return self.key_iterator()
def itervalues(self): return self.value_iterator()
def iteritems(self): return self.iterator()
def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargs___getitem__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargs___delitem__(self, *args)
def has_key(self, *args): return _SoapySDR.SoapySDRKwargs_has_key(self, *args)
def keys(self): return _SoapySDR.SoapySDRKwargs_keys(self)
def values(self): return _SoapySDR.SoapySDRKwargs_values(self)
def items(self): return _SoapySDR.SoapySDRKwargs_items(self)
def __contains__(self, *args): return _SoapySDR.SoapySDRKwargs___contains__(self, *args)
def key_iterator(self): return _SoapySDR.SoapySDRKwargs_key_iterator(self)
def value_iterator(self): return _SoapySDR.SoapySDRKwargs_value_iterator(self)
def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargs___setitem__(self, *args)
def asdict(self): return _SoapySDR.SoapySDRKwargs_asdict(self)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRKwargs(*args)
try: self.this.append(this)
except: self.this = this
def empty(self): return _SoapySDR.SoapySDRKwargs_empty(self)
def size(self): return _SoapySDR.SoapySDRKwargs_size(self)
def clear(self): return _SoapySDR.SoapySDRKwargs_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRKwargs_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRKwargs_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRKwargs_begin(self)
def end(self): return _SoapySDR.SoapySDRKwargs_end(self)
def rbegin(self): return _SoapySDR.SoapySDRKwargs_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRKwargs_rend(self)
def count(self, *args): return _SoapySDR.SoapySDRKwargs_count(self, *args)
def erase(self, *args): return _SoapySDR.SoapySDRKwargs_erase(self, *args)
def find(self, *args): return _SoapySDR.SoapySDRKwargs_find(self, *args)
def lower_bound(self, *args): return _SoapySDR.SoapySDRKwargs_lower_bound(self, *args)
def upper_bound(self, *args): return _SoapySDR.SoapySDRKwargs_upper_bound(self, *args)
def __str__(self):
out = list()
for k, v in self.iteritems():
out.append("%s=%s"%(k, v))
return '{'+(', '.join(out))+'}'
__swig_destroy__ = _SoapySDR.delete_SoapySDRKwargs
__del__ = lambda self : None;
SoapySDRKwargs_swigregister = _SoapySDR.SoapySDRKwargs_swigregister
SoapySDRKwargs_swigregister(SoapySDRKwargs)
class SoapySDRKwargsList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRKwargsList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRKwargsList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRKwargsList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRKwargsList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRKwargsList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRKwargsList___len__(self)
def pop(self): return _SoapySDR.SoapySDRKwargsList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRKwargsList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRKwargsList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRKwargsList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRKwargsList_empty(self)
def size(self): return _SoapySDR.SoapySDRKwargsList_size(self)
def clear(self): return _SoapySDR.SoapySDRKwargsList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRKwargsList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRKwargsList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRKwargsList_begin(self)
def end(self): return _SoapySDR.SoapySDRKwargsList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRKwargsList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRKwargsList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRKwargsList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRKwargsList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRKwargsList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRKwargsList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRKwargsList_front(self)
def back(self): return _SoapySDR.SoapySDRKwargsList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRKwargsList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRKwargsList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRKwargsList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRKwargsList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRKwargsList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRKwargsList
__del__ = lambda self : None;
SoapySDRKwargsList_swigregister = _SoapySDR.SoapySDRKwargsList_swigregister
SoapySDRKwargsList_swigregister(SoapySDRKwargsList)
class SoapySDRArgInfoList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRArgInfoList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRArgInfoList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRArgInfoList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRArgInfoList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRArgInfoList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRArgInfoList___len__(self)
def pop(self): return _SoapySDR.SoapySDRArgInfoList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRArgInfoList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRArgInfoList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRArgInfoList_empty(self)
def size(self): return _SoapySDR.SoapySDRArgInfoList_size(self)
def clear(self): return _SoapySDR.SoapySDRArgInfoList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRArgInfoList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRArgInfoList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRArgInfoList_begin(self)
def end(self): return _SoapySDR.SoapySDRArgInfoList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRArgInfoList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRArgInfoList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRArgInfoList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRArgInfoList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRArgInfoList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRArgInfoList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRArgInfoList_front(self)
def back(self): return _SoapySDR.SoapySDRArgInfoList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRArgInfoList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRArgInfoList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRArgInfoList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRArgInfoList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRArgInfoList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRArgInfoList
__del__ = lambda self : None;
SoapySDRArgInfoList_swigregister = _SoapySDR.SoapySDRArgInfoList_swigregister
SoapySDRArgInfoList_swigregister(SoapySDRArgInfoList)
class SoapySDRStringList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRStringList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRStringList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRStringList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRStringList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRStringList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRStringList___len__(self)
def pop(self): return _SoapySDR.SoapySDRStringList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRStringList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRStringList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRStringList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRStringList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRStringList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRStringList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRStringList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRStringList_empty(self)
def size(self): return _SoapySDR.SoapySDRStringList_size(self)
def clear(self): return _SoapySDR.SoapySDRStringList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRStringList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRStringList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRStringList_begin(self)
def end(self): return _SoapySDR.SoapySDRStringList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRStringList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRStringList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRStringList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRStringList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRStringList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRStringList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRStringList_front(self)
def back(self): return _SoapySDR.SoapySDRStringList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRStringList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRStringList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRStringList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRStringList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRStringList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRStringList
__del__ = lambda self : None;
SoapySDRStringList_swigregister = _SoapySDR.SoapySDRStringList_swigregister
SoapySDRStringList_swigregister(SoapySDRStringList)
class SoapySDRRangeList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRRangeList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRRangeList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRRangeList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRRangeList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRRangeList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRRangeList___len__(self)
def pop(self): return _SoapySDR.SoapySDRRangeList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRRangeList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRRangeList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRRangeList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRRangeList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRRangeList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRRangeList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRRangeList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRRangeList_empty(self)
def size(self): return _SoapySDR.SoapySDRRangeList_size(self)
def clear(self): return _SoapySDR.SoapySDRRangeList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRRangeList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRRangeList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRRangeList_begin(self)
def end(self): return _SoapySDR.SoapySDRRangeList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRRangeList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRRangeList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRRangeList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRRangeList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRRangeList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRRangeList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRRangeList_front(self)
def back(self): return _SoapySDR.SoapySDRRangeList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRRangeList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRRangeList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRRangeList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRRangeList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRRangeList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRRangeList
__del__ = lambda self : None;
SoapySDRRangeList_swigregister = _SoapySDR.SoapySDRRangeList_swigregister
SoapySDRRangeList_swigregister(SoapySDRRangeList)
class SoapySDRSizeList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRSizeList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRSizeList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRSizeList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRSizeList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRSizeList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRSizeList___len__(self)
def pop(self): return _SoapySDR.SoapySDRSizeList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRSizeList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRSizeList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRSizeList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRSizeList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRSizeList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRSizeList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRSizeList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRSizeList_empty(self)
def size(self): return _SoapySDR.SoapySDRSizeList_size(self)
def clear(self): return _SoapySDR.SoapySDRSizeList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRSizeList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRSizeList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRSizeList_begin(self)
def end(self): return _SoapySDR.SoapySDRSizeList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRSizeList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRSizeList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRSizeList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRSizeList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRSizeList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRSizeList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRSizeList_front(self)
def back(self): return _SoapySDR.SoapySDRSizeList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRSizeList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRSizeList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRSizeList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRSizeList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRSizeList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRSizeList
__del__ = lambda self : None;
SoapySDRSizeList_swigregister = _SoapySDR.SoapySDRSizeList_swigregister
SoapySDRSizeList_swigregister(SoapySDRSizeList)
class SoapySDRDoubleList(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SoapySDRDoubleList, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SoapySDRDoubleList, name)
__repr__ = _swig_repr
def iterator(self): return _SoapySDR.SoapySDRDoubleList_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _SoapySDR.SoapySDRDoubleList___nonzero__(self)
def __bool__(self): return _SoapySDR.SoapySDRDoubleList___bool__(self)
def __len__(self): return _SoapySDR.SoapySDRDoubleList___len__(self)
def pop(self): return _SoapySDR.SoapySDRDoubleList_pop(self)
def __getslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___getslice__(self, *args)
def __setslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___setslice__(self, *args)
def __delslice__(self, *args): return _SoapySDR.SoapySDRDoubleList___delslice__(self, *args)
def __delitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___delitem__(self, *args)
def __getitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___getitem__(self, *args)
def __setitem__(self, *args): return _SoapySDR.SoapySDRDoubleList___setitem__(self, *args)
def append(self, *args): return _SoapySDR.SoapySDRDoubleList_append(self, *args)
def empty(self): return _SoapySDR.SoapySDRDoubleList_empty(self)
def size(self): return _SoapySDR.SoapySDRDoubleList_size(self)
def clear(self): return _SoapySDR.SoapySDRDoubleList_clear(self)
def swap(self, *args): return _SoapySDR.SoapySDRDoubleList_swap(self, *args)
def get_allocator(self): return _SoapySDR.SoapySDRDoubleList_get_allocator(self)
def begin(self): return _SoapySDR.SoapySDRDoubleList_begin(self)
def end(self): return _SoapySDR.SoapySDRDoubleList_end(self)
def rbegin(self): return _SoapySDR.SoapySDRDoubleList_rbegin(self)
def rend(self): return _SoapySDR.SoapySDRDoubleList_rend(self)
def pop_back(self): return _SoapySDR.SoapySDRDoubleList_pop_back(self)
def erase(self, *args): return _SoapySDR.SoapySDRDoubleList_erase(self, *args)
def __init__(self, *args):
this = _SoapySDR.new_SoapySDRDoubleList(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _SoapySDR.SoapySDRDoubleList_push_back(self, *args)
def front(self): return _SoapySDR.SoapySDRDoubleList_front(self)
def back(self): return _SoapySDR.SoapySDRDoubleList_back(self)
def assign(self, *args): return _SoapySDR.SoapySDRDoubleList_assign(self, *args)
def resize(self, *args): return _SoapySDR.SoapySDRDoubleList_resize(self, *args)
def insert(self, *args): return _SoapySDR.SoapySDRDoubleList_insert(self, *args)
def reserve(self, *args): return _SoapySDR.SoapySDRDoubleList_reserve(self, *args)
def capacity(self): return _SoapySDR.SoapySDRDoubleList_capacity(self)
__swig_destroy__ = _SoapySDR.delete_SoapySDRDoubleList
__del__ = lambda self : None;
SoapySDRDoubleList_swigregister = _SoapySDR.SoapySDRDoubleList_swigregister
SoapySDRDoubleList_swigregister(SoapySDRDoubleList)
class StreamResult(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StreamResult, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StreamResult, name)
__repr__ = _swig_repr
def __init__(self):
this = _SoapySDR.new_StreamResult()
try: self.this.append(this)
except: self.this = this
__swig_setmethods__["ret"] = _SoapySDR.StreamResult_ret_set
__swig_getmethods__["ret"] = _SoapySDR.StreamResult_ret_get
if _newclass:ret = _swig_property(_SoapySDR.StreamResult_ret_get, _SoapySDR.StreamResult_ret_set)
__swig_setmethods__["flags"] = _SoapySDR.StreamResult_flags_set
__swig_getmethods__["flags"] = _SoapySDR.StreamResult_flags_get
if _newclass:flags = _swig_property(_SoapySDR.StreamResult_flags_get, _SoapySDR.StreamResult_flags_set)
__swig_setmethods__["timeNs"] = _SoapySDR.StreamResult_timeNs_set
__swig_getmethods__["timeNs"] = _SoapySDR.StreamResult_timeNs_get
if _newclass:timeNs = _swig_property(_SoapySDR.StreamResult_timeNs_get, _SoapySDR.StreamResult_timeNs_set)
__swig_setmethods__["chanMask"] = _SoapySDR.StreamResult_chanMask_set
__swig_getmethods__["chanMask"] = _SoapySDR.StreamResult_chanMask_get
if _newclass:chanMask = _swig_property(_SoapySDR.StreamResult_chanMask_get, _SoapySDR.StreamResult_chanMask_set)
def __str__(self):
return "ret=%s, flags=%s, timeNs=%s"%(self.ret, self.flags, self.timeNs)
__swig_destroy__ = _SoapySDR.delete_StreamResult
__del__ = lambda self : None;
StreamResult_swigregister = _SoapySDR.StreamResult_swigregister
StreamResult_swigregister(StreamResult)
SOAPY_SDR_TX = _SoapySDR.SOAPY_SDR_TX
SOAPY_SDR_RX = _SoapySDR.SOAPY_SDR_RX
SOAPY_SDR_END_BURST = _SoapySDR.SOAPY_SDR_END_BURST
SOAPY_SDR_HAS_TIME = _SoapySDR.SOAPY_SDR_HAS_TIME
SOAPY_SDR_END_ABRUPT = _SoapySDR.SOAPY_SDR_END_ABRUPT
SOAPY_SDR_ONE_PACKET = _SoapySDR.SOAPY_SDR_ONE_PACKET
SOAPY_SDR_MORE_FRAGMENTS = _SoapySDR.SOAPY_SDR_MORE_FRAGMENTS
SOAPY_SDR_WAIT_TRIGGER = _SoapySDR.SOAPY_SDR_WAIT_TRIGGER
def SoapySDR_errToStr(*args):
return _SoapySDR.SoapySDR_errToStr(*args)
SoapySDR_errToStr = _SoapySDR.SoapySDR_errToStr
SOAPY_SDR_TIMEOUT = _SoapySDR.SOAPY_SDR_TIMEOUT
SOAPY_SDR_STREAM_ERROR = _SoapySDR.SOAPY_SDR_STREAM_ERROR
SOAPY_SDR_CORRUPTION = _SoapySDR.SOAPY_SDR_CORRUPTION
SOAPY_SDR_OVERFLOW = _SoapySDR.SOAPY_SDR_OVERFLOW
SOAPY_SDR_NOT_SUPPORTED = _SoapySDR.SOAPY_SDR_NOT_SUPPORTED
SOAPY_SDR_TIME_ERROR = _SoapySDR.SOAPY_SDR_TIME_ERROR
SOAPY_SDR_UNDERFLOW = _SoapySDR.SOAPY_SDR_UNDERFLOW
SOAPY_SDR_API_VERSION = _SoapySDR.SOAPY_SDR_API_VERSION
SOAPY_SDR_ABI_VERSION = _SoapySDR.SOAPY_SDR_ABI_VERSION
def SoapySDR_getAPIVersion():
return _SoapySDR.SoapySDR_getAPIVersion()
SoapySDR_getAPIVersion = _SoapySDR.SoapySDR_getAPIVersion
def SoapySDR_getABIVersion():
return _SoapySDR.SoapySDR_getABIVersion()
SoapySDR_getABIVersion = _SoapySDR.SoapySDR_getABIVersion
def SoapySDR_getLibVersion():
return _SoapySDR.SoapySDR_getLibVersion()
SoapySDR_getLibVersion = _SoapySDR.SoapySDR_getLibVersion
SOAPY_SDR_CF64 = _SoapySDR.SOAPY_SDR_CF64
SOAPY_SDR_CF32 = _SoapySDR.SOAPY_SDR_CF32
SOAPY_SDR_CS32 = _SoapySDR.SOAPY_SDR_CS32
SOAPY_SDR_CU32 = _SoapySDR.SOAPY_SDR_CU32
SOAPY_SDR_CS16 = _SoapySDR.SOAPY_SDR_CS16
SOAPY_SDR_CU16 = _SoapySDR.SOAPY_SDR_CU16
SOAPY_SDR_CS12 = _SoapySDR.SOAPY_SDR_CS12
SOAPY_SDR_CU12 = _SoapySDR.SOAPY_SDR_CU12
SOAPY_SDR_CS8 = _SoapySDR.SOAPY_SDR_CS8
SOAPY_SDR_CU8 = _SoapySDR.SOAPY_SDR_CU8
SOAPY_SDR_CS4 = _SoapySDR.SOAPY_SDR_CS4
SOAPY_SDR_CU4 = _SoapySDR.SOAPY_SDR_CU4
SOAPY_SDR_F64 = _SoapySDR.SOAPY_SDR_F64
SOAPY_SDR_F32 = _SoapySDR.SOAPY_SDR_F32
SOAPY_SDR_S32 = _SoapySDR.SOAPY_SDR_S32
SOAPY_SDR_U32 = _SoapySDR.SOAPY_SDR_U32
SOAPY_SDR_S16 = _SoapySDR.SOAPY_SDR_S16
SOAPY_SDR_U16 = _SoapySDR.SOAPY_SDR_U16
SOAPY_SDR_S8 = _SoapySDR.SOAPY_SDR_S8
SOAPY_SDR_U8 = _SoapySDR.SOAPY_SDR_U8
def SoapySDR_formatToSize(*args):
return _SoapySDR.SoapySDR_formatToSize(*args)
SoapySDR_formatToSize = _SoapySDR.SoapySDR_formatToSize
SOAPY_SDR_FATAL = _SoapySDR.SOAPY_SDR_FATAL
SOAPY_SDR_CRITICAL = _SoapySDR.SOAPY_SDR_CRITICAL
SOAPY_SDR_ERROR = _SoapySDR.SOAPY_SDR_ERROR
SOAPY_SDR_WARNING = _SoapySDR.SOAPY_SDR_WARNING
SOAPY_SDR_NOTICE = _SoapySDR.SOAPY_SDR_NOTICE
SOAPY_SDR_INFO = _SoapySDR.SOAPY_SDR_INFO
SOAPY_SDR_DEBUG = _SoapySDR.SOAPY_SDR_DEBUG
SOAPY_SDR_TRACE = _SoapySDR.SOAPY_SDR_TRACE
SOAPY_SDR_SSI = _SoapySDR.SOAPY_SDR_SSI
def SoapySDR_log(*args):
return _SoapySDR.SoapySDR_log(*args)
SoapySDR_log = _SoapySDR.SoapySDR_log
def SoapySDR_setLogLevel(*args):
return _SoapySDR.SoapySDR_setLogLevel(*args)
SoapySDR_setLogLevel = _SoapySDR.SoapySDR_setLogLevel
def errToStr(*args):
return _SoapySDR.errToStr(*args)
errToStr = _SoapySDR.errToStr
def getAPIVersion():
return _SoapySDR.getAPIVersion()
getAPIVersion = _SoapySDR.getAPIVersion
def getABIVersion():
return _SoapySDR.getABIVersion()
getABIVersion = _SoapySDR.getABIVersion
def getLibVersion():
return _SoapySDR.getLibVersion()
getLibVersion = _SoapySDR.getLibVersion
def getRootPath():
return _SoapySDR.getRootPath()
getRootPath = _SoapySDR.getRootPath
def listSearchPaths():
return _SoapySDR.listSearchPaths()
listSearchPaths = _SoapySDR.listSearchPaths
def listModules(*args):
return _SoapySDR.listModules(*args)
listModules = _SoapySDR.listModules
def loadModule(*args):
return _SoapySDR.loadModule(*args)
loadModule = _SoapySDR.loadModule
def getLoaderResult(*args):
return _SoapySDR.getLoaderResult(*args)
getLoaderResult = _SoapySDR.getLoaderResult
def unloadModule(*args):
return _SoapySDR.unloadModule(*args)
unloadModule = _SoapySDR.unloadModule
def loadModules():
return _SoapySDR.loadModules()
loadModules = _SoapySDR.loadModules
def formatToSize(*args):
return _SoapySDR.formatToSize(*args)
formatToSize = _SoapySDR.formatToSize
def ticksToTimeNs(*args):
return _SoapySDR.ticksToTimeNs(*args)
ticksToTimeNs = _SoapySDR.ticksToTimeNs
def timeNsToTicks(*args):
return _SoapySDR.timeNsToTicks(*args)
timeNsToTicks = _SoapySDR.timeNsToTicks
def log(*args):
return _SoapySDR.log(*args)
log = _SoapySDR.log
def setLogLevel(*args):
return _SoapySDR.setLogLevel(*args)
setLogLevel = _SoapySDR.setLogLevel
class Device(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Device, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Device, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _SoapySDR.delete_Device
__del__ = lambda self : None;
__swig_getmethods__["enumerate"] = lambda x: _SoapySDR.Device_enumerate
if _newclass:enumerate = staticmethod(_SoapySDR.Device_enumerate)
__swig_getmethods__["make"] = lambda x: _SoapySDR.Device_make
if _newclass:make = staticmethod(_SoapySDR.Device_make)
__swig_getmethods__["unmake"] = lambda x: _SoapySDR.Device_unmake
if _newclass:unmake = staticmethod(_SoapySDR.Device_unmake)
def getDriverKey(self): return _SoapySDR.Device_getDriverKey(self)
def getHardwareKey(self): return _SoapySDR.Device_getHardwareKey(self)
def getHardwareInfo(self): return _SoapySDR.Device_getHardwareInfo(self)
def setFrontendMapping(self, *args): return _SoapySDR.Device_setFrontendMapping(self, *args)
def getFrontendMapping(self, *args): return _SoapySDR.Device_getFrontendMapping(self, *args)
def getNumChannels(self, *args): return _SoapySDR.Device_getNumChannels(self, *args)
def getChannelInfo(self, *args): return _SoapySDR.Device_getChannelInfo(self, *args)
def getFullDuplex(self, *args): return _SoapySDR.Device_getFullDuplex(self, *args)
def getStreamFormats(self, *args): return _SoapySDR.Device_getStreamFormats(self, *args)
def getNativeStreamFormat(self, *args): return _SoapySDR.Device_getNativeStreamFormat(self, *args)
def getStreamArgsInfo(self, *args): return _SoapySDR.Device_getStreamArgsInfo(self, *args)
def setupStream(self, *args): return _SoapySDR.Device_setupStream(self, *args)
def closeStream(self, *args): return _SoapySDR.Device_closeStream(self, *args)
def getStreamMTU(self, *args): return _SoapySDR.Device_getStreamMTU(self, *args)
def activateStream(self, *args): return _SoapySDR.Device_activateStream(self, *args)
def deactivateStream(self, *args): return _SoapySDR.Device_deactivateStream(self, *args)
def readStream(self, *args): return _SoapySDR.Device_readStream(self, *args)
def writeStream(self, *args): return _SoapySDR.Device_writeStream(self, *args)
def readStreamStatus(self, *args): return _SoapySDR.Device_readStreamStatus(self, *args)
def getNumDirectAccessBuffers(self, *args): return _SoapySDR.Device_getNumDirectAccessBuffers(self, *args)
def getDirectAccessBufferAddrs(self, *args): return _SoapySDR.Device_getDirectAccessBufferAddrs(self, *args)
def acquireReadBuffer(self, *args): return _SoapySDR.Device_acquireReadBuffer(self, *args)
def releaseReadBuffer(self, *args): return _SoapySDR.Device_releaseReadBuffer(self, *args)
def acquireWriteBuffer(self, *args): return _SoapySDR.Device_acquireWriteBuffer(self, *args)
def releaseWriteBuffer(self, *args): return _SoapySDR.Device_releaseWriteBuffer(self, *args)
def listAntennas(self, *args): return _SoapySDR.Device_listAntennas(self, *args)
def setAntenna(self, *args): return _SoapySDR.Device_setAntenna(self, *args)
def getAntenna(self, *args): return _SoapySDR.Device_getAntenna(self, *args)
def hasDCOffsetMode(self, *args): return _SoapySDR.Device_hasDCOffsetMode(self, *args)
def setDCOffsetMode(self, *args): return _SoapySDR.Device_setDCOffsetMode(self, *args)
def getDCOffsetMode(self, *args): return _SoapySDR.Device_getDCOffsetMode(self, *args)
def hasDCOffset(self, *args): return _SoapySDR.Device_hasDCOffset(self, *args)
def setDCOffset(self, *args): return _SoapySDR.Device_setDCOffset(self, *args)
def getDCOffset(self, *args): return _SoapySDR.Device_getDCOffset(self, *args)
def hasIQBalance(self, *args): return _SoapySDR.Device_hasIQBalance(self, *args)
def setIQBalance(self, *args): return _SoapySDR.Device_setIQBalance(self, *args)
def getIQBalance(self, *args): return _SoapySDR.Device_getIQBalance(self, *args)
def hasFrequencyCorrection(self, *args): return _SoapySDR.Device_hasFrequencyCorrection(self, *args)
def setFrequencyCorrection(self, *args): return _SoapySDR.Device_setFrequencyCorrection(self, *args)
def getFrequencyCorrection(self, *args): return _SoapySDR.Device_getFrequencyCorrection(self, *args)
def listGains(self, *args): return _SoapySDR.Device_listGains(self, *args)
def hasGainMode(self, *args): return _SoapySDR.Device_hasGainMode(self, *args)
def setGainMode(self, *args): return _SoapySDR.Device_setGainMode(self, *args)
def getGainMode(self, *args): return _SoapySDR.Device_getGainMode(self, *args)
def setGain(self, *args): return _SoapySDR.Device_setGain(self, *args)
def getGain(self, *args): return _SoapySDR.Device_getGain(self, *args)
def getGainRange(self, *args): return _SoapySDR.Device_getGainRange(self, *args)
def setFrequency(self, *args): return _SoapySDR.Device_setFrequency(self, *args)
def getFrequency(self, *args): return _SoapySDR.Device_getFrequency(self, *args)
def listFrequencies(self, *args): return _SoapySDR.Device_listFrequencies(self, *args)
def getFrequencyRange(self, *args): return _SoapySDR.Device_getFrequencyRange(self, *args)
def getFrequencyArgsInfo(self, *args): return _SoapySDR.Device_getFrequencyArgsInfo(self, *args)
def setSampleRate(self, *args): return _SoapySDR.Device_setSampleRate(self, *args)
def getSampleRate(self, *args): return _SoapySDR.Device_getSampleRate(self, *args)
def listSampleRates(self, *args): return _SoapySDR.Device_listSampleRates(self, *args)
def getSampleRateRange(self, *args): return _SoapySDR.Device_getSampleRateRange(self, *args)
def setBandwidth(self, *args): return _SoapySDR.Device_setBandwidth(self, *args)
def getBandwidth(self, *args): return _SoapySDR.Device_getBandwidth(self, *args)
def listBandwidths(self, *args): return _SoapySDR.Device_listBandwidths(self, *args)
def getBandwidthRange(self, *args): return _SoapySDR.Device_getBandwidthRange(self, *args)
def setMasterClockRate(self, *args): return _SoapySDR.Device_setMasterClockRate(self, *args)
def getMasterClockRate(self): return _SoapySDR.Device_getMasterClockRate(self)
def getMasterClockRates(self): return _SoapySDR.Device_getMasterClockRates(self)
def listClockSources(self): return _SoapySDR.Device_listClockSources(self)
def setClockSource(self, *args): return _SoapySDR.Device_setClockSource(self, *args)
def getClockSource(self): return _SoapySDR.Device_getClockSource(self)
def listTimeSources(self): return _SoapySDR.Device_listTimeSources(self)
def setTimeSource(self, *args): return _SoapySDR.Device_setTimeSource(self, *args)
def getTimeSource(self): return _SoapySDR.Device_getTimeSource(self)
def hasHardwareTime(self, what=""): return _SoapySDR.Device_hasHardwareTime(self, what)
def getHardwareTime(self, what=""): return _SoapySDR.Device_getHardwareTime(self, what)
def setHardwareTime(self, *args): return _SoapySDR.Device_setHardwareTime(self, *args)
def setCommandTime(self, *args): return _SoapySDR.Device_setCommandTime(self, *args)
def listSensors(self, *args): return _SoapySDR.Device_listSensors(self, *args)
def getSensorInfo(self, *args): return _SoapySDR.Device_getSensorInfo(self, *args)
def readSensor(self, *args): return _SoapySDR.Device_readSensor(self, *args)
def listRegisterInterfaces(self): return _SoapySDR.Device_listRegisterInterfaces(self)
def writeRegister(self, *args): return _SoapySDR.Device_writeRegister(self, *args)
def readRegister(self, *args): return _SoapySDR.Device_readRegister(self, *args)
def writeRegisters(self, *args): return _SoapySDR.Device_writeRegisters(self, *args)
def readRegisters(self, *args): return _SoapySDR.Device_readRegisters(self, *args)
def getSettingInfo(self, *args): return _SoapySDR.Device_getSettingInfo(self, *args)
def writeSetting(self, *args): return _SoapySDR.Device_writeSetting(self, *args)
def readSetting(self, *args): return _SoapySDR.Device_readSetting(self, *args)
def listGPIOBanks(self): return _SoapySDR.Device_listGPIOBanks(self)
def writeGPIO(self, *args): return _SoapySDR.Device_writeGPIO(self, *args)
def readGPIO(self, *args): return _SoapySDR.Device_readGPIO(self, *args)
def writeGPIODir(self, *args): return _SoapySDR.Device_writeGPIODir(self, *args)
def readGPIODir(self, *args): return _SoapySDR.Device_readGPIODir(self, *args)
def writeI2C(self, *args): return _SoapySDR.Device_writeI2C(self, *args)
def readI2C(self, *args): return _SoapySDR.Device_readI2C(self, *args)
def transactSPI(self, *args): return _SoapySDR.Device_transactSPI(self, *args)
def listUARTs(self): return _SoapySDR.Device_listUARTs(self)
def writeUART(self, *args): return _SoapySDR.Device_writeUART(self, *args)
def readUART(self, *args): return _SoapySDR.Device_readUART(self, *args)
def readStream__(self, *args): return _SoapySDR.Device_readStream__(self, *args)
def writeStream__(self, *args): return _SoapySDR.Device_writeStream__(self, *args)
def readStreamStatus__(self, *args): return _SoapySDR.Device_readStreamStatus__(self, *args)
#call unmake from custom deleter
def __del__(self):
Device.unmake(self)
def __str__(self):
return "%s:%s"%(self.getDriverKey(), self.getHardwareKey())
def readStream(self, stream, buffs, numElems, flags = 0, timeoutUs = 100000):
ptrs = [extractBuffPointer(b) for b in buffs]
return self.readStream__(stream, ptrs, numElems, flags, timeoutUs)
def writeStream(self, stream, buffs, numElems, flags = 0, timeNs = 0, timeoutUs = 100000):
ptrs = [extractBuffPointer(b) for b in buffs]
return self.writeStream__(stream, ptrs, numElems, flags, timeNs, timeoutUs)
def readStreamStatus(self, stream, timeoutUs = 100000):
return self.readStreamStatus__(stream, timeoutUs)
Device_swigregister = _SoapySDR.Device_swigregister
Device_swigregister(Device)
def Device_enumerate(*args):
return _SoapySDR.Device_enumerate(*args)
Device_enumerate = _SoapySDR.Device_enumerate
def Device_make(*args):
return _SoapySDR.Device_make(*args)
Device_make = _SoapySDR.Device_make
def Device_unmake(*args):
return _SoapySDR.Device_unmake(*args)
Device_unmake = _SoapySDR.Device_unmake
__all__ = list()
for key in sorted(globals().keys()):
if key.startswith('SOAPY_SDR_'):
__all__.append(key)
_Device = Device
class Device(Device):
def __new__(cls, *args, **kwargs):
return cls.make(*args, **kwargs)
def extractBuffPointer(buff):
if hasattr(buff, '__array_interface__'): return buff.__array_interface__['data'][0]
if hasattr(buff, '__long__'): return long(buff)
if hasattr(buff, '__int__'): return int(buff)
raise Exception("Unrecognized data format: " + str(type(buff)))
# This file is compatible with both classic and new-style classes.
|
9,885 | 237a93ff73cb98fd9d4006f14d3cadbdc09259a4 | import scrapy
import time
import os.path
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from tempfile import mkstemp
from shutil import move
from os import fdopen, remove
from datetime import datetime
import logging
output_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')
log_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)
class ProductSpider(scrapy.Spider):
name = "tekniknet_new"
allowed_domains = ['www.tekniknet.se']
start_urls = ['https://www.tekniknet.se/#']
def __init__(self):
# self.driver = webdriver.Chrome("./chromedriver.exe")
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(chrome_options=options)
def parse(self, response):
# Quiet down all the unnecessary logging.
fh = logging.FileHandler(log_output_file)
fh.setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote.remote_connection').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection').addHandler(fh)
logging.getLogger('urllib3.connectionpool').addHandler(fh)
logging.getLogger().addHandler(fh)
self.loggger = logging.getLogger()
self.driver.get(response.url)
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list3_categories = []
list4_categories = []
csv_categories1 = ''
csv_heading = ''
csv_stock = ''
csv_price_new = ''
csv_price_old = ''
csv_desc = ''
csv_article_number = ''
# article_number_list = []
csv_image_url = []
# file_exist = False
old_product_url = []
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'email')))
username = self.driver.find_element_by_id('email')
username.send_keys("info@themobilestore.se")
username = self.driver.find_element_by_id('password')
username.send_keys("order88")
login = self.driver.find_element_by_class_name('button-confirm')
login.click()
time.sleep(5)
#Create temp file
fh, abs_path = mkstemp()
with fdopen(fh,'w') as new_file:
with open("tekniknet.csv") as old_file:
for line in old_file:
new_file.write(line.replace('NEW', 'old'))
#Remove original file
remove("tekniknet.csv")
#Move new file
move(abs_path, "tekniknet.csv")
with open('tekniknet.csv', 'r') as ins:
for line in ins:
old_product_url.append(line.split(',')[-1])
file = open("tekniknet.csv", "a", errors ='replace')
# file.write('OLD/NEW' + ',' + 'article number' + ',' + 'category1' + ',' + 'category2' + ',' + 'category3' + ',' + 'heading' + ',' + 'description' + ',' + 'current price' + ',' + 'previous price' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'EAN code' + ',' + 'stock' + ',' + 'product url' + '\n')
for wrapper1 in self.driver.find_elements_by_class_name('level-0'):
child_wrapper1 = wrapper1.find_element_by_xpath('./a')
link1 = child_wrapper1.get_attribute('href')
list1.append(link1)
self.loggger.info('*************************************************')
self.loggger.info(link1)
for i in range(0, len(list1)-4):
self.driver.get(list1[i])
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'inner')))
for wrapper2 in self.driver.find_elements_by_class_name('inner'):
try:
sub2 = wrapper2.find_element_by_class_name('subLinks')
child_wrapper2 = sub2.find_elements_by_xpath('.//a')
for child2 in child_wrapper2:
link2 = child2.get_attribute('href')
list2.append(link2)
self.loggger.info('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
self.loggger.info(link2)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
except:
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))
subcategory = self.driver.find_element_by_id('categorySubCategories')
wrapper2_1 = subcategory.find_elements_by_xpath('.//a')
for child3 in wrapper2_1:
link2_1 = child3.get_attribute('href')
list5.append(link2_1)
for n in range(0, len(list5)):
self.driver.get(list5[n])
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))
subcategory = self.driver.find_element_by_id('categorySubCategories')
wrapper2_1_1 = subcategory.find_elements_by_xpath('.//a')
for child3_1 in wrapper2_1_1:
if child3_1.text != 'Visa alla':
link2_1_1 = child3_1.get_attribute('href')
list2.append(link2_1_1)
except:
try:
breadcrumbs2 = self.driver.find_element_by_id('breadcrumbs')
categories2 = breadcrumbs2.find_elements_by_xpath('.//li')
csv_categories2 = ''
for category2 in categories2:
csv_categories2 = csv_categories2 + category2.text + '/'
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))
for wrapper2_2 in self.driver.find_elements_by_class_name('listProduct'):
wrapper2_3 = wrapper2_2.find_element_by_xpath(".//a")
link2_2 = wrapper2_3.get_attribute('href')
list4.append(link2_2)
list4_categories.append(csv_categories2)
self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
self.loggger.info(link2_2)
self.loggger.info('error')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
# for m in range(0, 5): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.
# for m in range(0, len(list2)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.
for j in range(0, len(list2)):
try:
self.loggger.info('**********-------------- ' + str(j) + ' ******************************')
self.driver.get(list2[j])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')
categories1 = breadcrumbs1.find_elements_by_xpath('.//li')
csv_categories1 = ''
for category1 in categories1:
csv_categories1 = csv_categories1 + category1.text + '/'
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))
for wrapper3 in self.driver.find_elements_by_class_name('listProduct'):
child_wrapper3 = wrapper3.find_element_by_xpath(".//a")
link3 = child_wrapper3.get_attribute('href')
list3.append(link3)
list3_categories.append(csv_categories1)
self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
self.loggger.info(link3)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for k in range(0, len(list3)):
try:
if list3[k] not in old_product_url:
self.loggger.info('----------------------- ' + str(k) + ' ******************************')
self.driver.get(list3[k])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
# breadcrumbs = self.driver.find_element_by_id('breadcrumbs')
# categories = breadcrumbs.find_elements_by_xpath('.//a')
# for category in categories:
# csv_categories.append(category.text)
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading3 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock3 non-exist')
csv_stock = ''
try:
price_new = offer.find_element_by_class_name('priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name('priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name('priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price3 non-exist')
csv_price_old = ''
csv_price_new = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\n', ' ').replace('\r', '').rstrip().lstrip()
except:
self.loggger.info('description3 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id('pManufacturer')
csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')
except:
self.loggger.info('article number3 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image3 non-exist')
######################################### CSV File Writing #########################################
# if csv_article_number not in article_number_list:
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
# article_number_list.append(csv_article_number)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error3')
# for m in range(0, 20): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.
# for m in range(0, len(list4)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.
for m in range(0, len(list4)):
try:
if list4[m] not in old_product_url:
self.loggger.info('********************** ' + str(k) + ' ******************************')
self.driver.get(list4[m])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
# breadcrumbs = self.driver.find_element_by_id('breadcrumbs')
# categories = breadcrumbs.find_elements_by_xpath('.//a')
# for category in categories:
# csv_categories.append(category.text)
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading4 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock4 non-exist')
try:
price_new = offer.find_element_by_class_name('priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name('priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name('priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price4 non-exist')
csv_price_new = ''
csv_price_old = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\n', ' ').replace('\r', '').rstrip().lstrip()
except:
self.loggger.info('description4 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id('pManufacturer')
csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')
except:
self.loggger.info('article number4 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image4 non-exist')
######################################### CSV File Writing #########################################
# if csv_article_number not in article_number_list:
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
# article_number_list.append(csv_article_number)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error4')
file.close()
self.driver.close()
|
9,886 | a8506420b1bc558fa953f0cec3f8c16beaf44909 | import cv2
import os
"""
视频场景拼接
"""
stich_path="stichImage\\"
def read_video(filename):
'''
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
'''
cap=cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count=0
success, frame = cap.read()
imageCount=0
while success:
success, frame = cap.read()
count+=1
if count>=rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
(shotname, extension)=os.path.splitext(filename)
shotname=shotname.split('\\')[len(shotname.split('\\'))-1]
if not os.path.exists(stich_path+shotname):
os.mkdir(stich_path+shotname)
# frame=cv2.resize(frame,(960,544))
cv2.imencode(".jpg", frame)[1].tofile(
stich_path+shotname+'\\'+str(imageCount)+'.jpg')
imageCount+=1
count=0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs=[]
for file in os.listdir(stich_path+shotname):
imgs.append(cv2.imread(stich_path+shotname+'\\'+file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path+shotname+'\\'+"stich_result.jpg", result[1])
def read_file_list(path):
if os.path.isdir(path):
pathlist=os.listdir(path)
for file in pathlist:
read_video(path+'\\'+file)
# read_video('E:\\2.mp4')
|
9,887 | 58ddf496245741498177a67b7ce692b97bbd476a | /usr/share/pyshared/screenlets/plugins/SizeConverter.py |
9,888 | 4db93bdab2d73e7226dcad61827f5faea8513767 | # These are instance types to make available to all AWS EC2 systems, except the .
# PostgreSQL server, until the auto tuning playbook can tune for systems that
# small.
AWSGlobalInstanceChoices = [
't2.nano', 't2.micro',
't3.nano', 't3.micro',
't3a.nano', 't3a.micro',
]
class SpecValidator:
def __init__(self, type=None, default=None, choices=[], min=None,
max=None):
self.type = type
self.default = default
self.choices = choices
self.min = min
self.max = max
DefaultAWSSpec = {
'available_os': {
'CentOS7': {
'image': SpecValidator(
type='string',
default="CentOS Linux 7 x86_64 HVM EBS*"
),
'ssh_user': SpecValidator(
type='choice',
choices=['centos'],
default='centos'
)
},
'RedHat7': {
'image': SpecValidator(
type='string',
default="RHEL-7.8-x86_64*"
),
'ssh_user': SpecValidator(
type='choice',
choices=['ec2-user'],
default='ec2-user'
)
},
'RedHat8': {
'image': SpecValidator(
type='string',
default="RHEL-8.2-x86_64*"
),
'ssh_user': SpecValidator(
type='choice',
choices=['ec2-user'],
default='ec2-user'
)
},
'RockyLinux8': {
'image': SpecValidator(
type='string',
default="Rocky-8-ec2-8.5-20211114.2.x86_64"
),
'ssh_user': SpecValidator(
type='choice',
choices=['rocky'],
default='rocky'
)
}
},
'dbt2': SpecValidator(
type='choice',
choices=[True, False],
default=False
),
'dbt2_client': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'
] + AWSGlobalInstanceChoices,
default='m5n.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
},
},
'dbt2_driver': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'
] + AWSGlobalInstanceChoices,
default='m5n.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
},
},
'hammerdb_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'
] + AWSGlobalInstanceChoices,
default='m5n.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
},
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'c5.large', 'c5.xlarge', 'c5.2xlarge', 'c5.4xlarge',
'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge', 'c5.24xlarge',
'c5.metal'
] + AWSGlobalInstanceChoices,
default='c5.xlarge'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],
default='gp2'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=100
),
'iops': SpecValidator(
type='integer',
min=100,
max=64000,
default=250
)
}
}
}
DefaultAzureSpec = {
'available_os': {
'CentOS7': {
'publisher': SpecValidator(type='string', default="OpenLogic"),
'offer': SpecValidator(type='string', default="CentOS"),
'sku': SpecValidator(type='string', default="7.7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat7': {
'publisher': SpecValidator(type='string', default="RedHat"),
'offer': SpecValidator(type='string', default="RHEL"),
'sku': SpecValidator(type='string', default="7.8"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat8': {
'publisher': SpecValidator(type='string', default="RedHat"),
'offer': SpecValidator(type='string', default="RHEL"),
'sku': SpecValidator(type='string', default="8.2"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RockyLinux8': {
'publisher': SpecValidator(type='string', default="Perforce"),
'offer': SpecValidator(type='string', default="rockylinux8"),
'sku': SpecValidator(type='string', default="8"),
'ssh_user': SpecValidator(type='string', default='rocky')
}
},
'dbt2': SpecValidator(
type='choice',
choices=[True, False],
default=False
),
'dbt2_driver': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'dbt2_client': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',
'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',
'Standard_A8m_v2'
],
default='Standard_A2_v2'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
}
},
'hammerdb_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'Standard_D4ds_v4', 'Standard_D8ds_v4'
],
default='Standard_D4ds_v4'
),
'volume': {
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='Standard_LRS'
)
},
'additional_volumes': {
'count': SpecValidator(
type='integer',
min=0,
max=5,
default=2
),
'storage_account_type': SpecValidator(
type='choice',
choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',
'UltraSSD_LRS'],
default='StandardSSD_LRS'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=100
)
}
}
}
DefaultGcloudSpec = {
'available_os': {
'CentOS7': {
'image': SpecValidator(type='string', default="centos-7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat7': {
'image': SpecValidator(type='string', default="rhel-7"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RedHat8': {
'image': SpecValidator(type='string', default="rhel-8"),
'ssh_user': SpecValidator(type='string', default='edbadm')
},
'RockyLinux8': {
'image': SpecValidator(type='string', default="rocky-linux-8"),
'ssh_user': SpecValidator(type='string', default='rocky')
}
},
'dbt2': SpecValidator(
type='choice',
choices=[True, False],
default=False
),
'dbt2_client': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'c2-standard-4', 'c2-standard-8', 'c2-standard-16'
],
default='c2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
)
}
},
'dbt2_driver': {
'count': SpecValidator(
type='integer',
min=0,
max=64,
default=0
),
'instance_type': SpecValidator(
type='choice',
choices=[
'c2-standard-4', 'c2-standard-8', 'c2-standard-16'
],
default='c2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
)
}
},
'hammerdb_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'c2-standard-4', 'c2-standard-8', 'c2-standard-16'
],
default='c2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=16000,
default=50
)
},
'additional_volumes': {
'count': SpecValidator(
type='integer',
min=0,
max=5,
default=2
),
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-ssd'
),
'size': SpecValidator(
type='integer',
min=10,
max=65536,
default=100
)
}
},
'pem_server': {
'instance_type': SpecValidator(
type='choice',
choices=[
'e2-standard-2', 'e2-standard-4', 'e2-standard-8',
'e2-standard-16', 'e2-standard-32', 'e2-highmem-2',
'e2-highmem-4', 'e2-highmem-8', 'e2-highmem-16'
],
default='e2-standard-4'
),
'volume': {
'type': SpecValidator(
type='choice',
choices=['pd-standard', 'pd-ssd'],
default='pd-standard'
),
'size': SpecValidator(
type='integer',
min=10,
max=65536,
default=100
)
}
}
}
|
9,889 | 841e859feff2151667d70e7bf1829129d1f92cf7 | from flask import Flask, Blueprint, render_template, request, redirect
from repositories import manufacturer_repository, product_repository
from models.manufacturer import Manufacturer
from models.product import Product
manufacturers_blueprint = Blueprint("manufacturers", __name__)
@manufacturers_blueprint.route("/manufacturers", methods=["GET"])
def manufacturers():
manufacturers = manufacturer_repository.select_all()
return render_template("manufacturers/index.html", manufacturers=manufacturers)
@manufacturers_blueprint.route("/manufacturers/<id>", methods=["GET"])
def show(id):
manufacturer = manufacturer_repository.select(id)
return render_template("manufacturers/show.html", manufacturer=manufacturer)
@manufacturers_blueprint.route("/manufacturers/add", methods=["GET"])
def add_manufacturer():
return render_template("manufacturers/add.html")
@manufacturers_blueprint.route("/manufacturers", methods=["POST"])
def create_manufacturer():
name = request.form["name"]
address = request.form["address"]
deactivated = request.form["deactivated"]
manufacturer = Manufacturer(name, address, deactivated)
manufacturer_repository.save(manufacturer)
return redirect("/manufacturers")
@manufacturers_blueprint.route("/manufacturers/<id>/edit", methods=["GET"])
def edit_manufacturer(id):
manufacturer = manufacturer_repository.select(id)
return render_template("manufacturers/edit.html", manufacturer = manufacturer)
@manufacturers_blueprint.route("/manufacturers/<id>", methods=["POST"])
def update_manufacturer(id):
name = request.form["name"]
address = request.form["address"]
deactivated = request.form["deactivated"]
manufacturer = Manufacturer(name, address, deactivated, id)
manufacturer_repository.update(manufacturer)
return redirect("/manufacturers")
@manufacturers_blueprint.route("/manufacturers/<id>/delete", methods=["POST"])
def delete_manufacturer(id):
manufacturer_repository.delete(id)
return redirect('/manufacturers')
|
9,890 | a028661f9bcaa6dfe5389cb57f31b07d7e981487 | from time import sleep
import sys
def cmdline():
available_commands = ['help', 'quit', 'echo', 'pbar', 'joke']
keepgoing = True
while (keepgoing):
typed = input("Type something. (Type 'help' for options)")
words = [w for w in typed.split(" ")]
command = words[0].lower()
arguments = words[1:]
if (command == ''):
continue
if (command not in available_commands):
print(f"-> {command} is an invalid command. Available commands:", available_commands)
continue
if (command == 'help'):
print('-> Try out the following commands', available_commands)
if (command == 'echo'):
print(f'-> {" ".join(arguments)}')
if (command == 'pbar'):
for i in range(21):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("[%-20s] %d%%" % ('=' * i, 5 * i))
sys.stdout.flush()
sleep(0.25)
print(' done!')
if (command == 'joke'):
import requests
joke = requests.get("https://official-joke-api.appspot.com/random_joke").json()
print(f"-> {joke['setup']}")
input("-> (press enter)")
print(f"-> {joke['punchline']}")
if (command == 'quit'):
keepgoing = False
else:
print("exiting..")
if __name__ == "__main__":
cmdline()
|
9,891 | 09ce2aeccfd1f3f4f130fd79001db47485cc95c2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import *
from json import json
def test_json_basestring():
assert_equals(json("Hello World"), '"Hello World"')
def test_json_integer():
assert_equals(json(9), "9")
def test_json_float():
assert_equals(json(1.234), "1.234")
def test_json_array():
data = [1, 2, 3]
assert_equals(json(data), '[1,2,3]')
def test_json_array02():
data = ['bla', 1, 1.2]
assert_equals(json(data), '["bla",1,1.2]')
def test_json_dict():
data = { 'foo': 'bar' }
assert_equals(json(data), '{"foo":"bar"}')
def test_json_dict_list():
data = { 'foo': [1, 2, 3] }
assert_equals(json(data), '{"foo":[1,2,3]}')
def test_json_dict_int_key():
data = {1:[1, 2, 3] }
assert_equals(json(data), '{"1":[1,2,3]}')
def test_json_dictindict():
data = { 'foo': {'fizz' : 'buzz'} }
assert_equals(json(data), '{"foo":{"fizz":"buzz"}}')
def test_json_2_dict():
data = { 'foo': 'fizz', 'bar' : 'buzz'}
assert_equals(json(data), '{"bar":"buzz","foo":"fizz"}')
def test_json_2_dict_2():
data = { 'foo': 'fizz', 'bar' : 'buzz', 'a': [1, 2, 3]}
assert_equals(json(data), '{"a":[1,2,3],"bar":"buzz","foo":"fizz"}')
def test_empty_list():
data = []
assert_equals(json(data), "[]")
def test_empty_dict():
data = {}
assert_equals(json(data), "{}")
def test_list_with_empty_dict():
data = [{}]
assert_equals(json(data), "[{}]")
def test_rangie2():
data = {"": 0}
assert_equals(json(data), '{"":0}')
def test_none():
assert_equals(json(None), "null")
def test_object():
def closure():
json(object())
assert_raises(TypeError, closure)
def test_bool():
assert_equals(json(True), 'true')
def test_object_in_array():
def closure():
json([object()])
assert_raises(TypeError, closure)
def test_object_in_dict():
def closure():
json({'a': object()})
assert_raises(TypeError, closure)
def test_object_class():
def closure():
json(object)
assert_raises(TypeError, closure)
def test_escape():
assert_equals(json('"') , '"\\""')
|
9,892 | ad079876476f6f291ad52aece8d0d5afdd5a8bcf | import os
from distutils.core import Extension
REPROJECT_ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
libraries = []
sources = []
sources.append(os.path.join(REPROJECT_ROOT, "_overlap.c"))
sources.append(os.path.join(REPROJECT_ROOT, "overlapArea.c"))
sources.append(os.path.join(REPROJECT_ROOT, "reproject_slice_c.c"))
include_dirs = ['numpy']
include_dirs.append(REPROJECT_ROOT)
extension = Extension(
name="reproject.spherical_intersect._overlap",
sources=sources,
include_dirs=include_dirs,
libraries=libraries,
language="c",
extra_compile_args=['-O2'])
return [extension]
def get_package_data():
header_files = ['overlapArea.h', 'reproject_slice_c.h', 'mNaN.h']
return {'reproject.spherical_intersect': header_files}
|
9,893 | f29bc0263f8bb1d59ab2442347727d9d3233ec77 | import tkinter as tk
import random
from tkinter import messagebox as mb
n = 16
class Application(tk.Frame):
playButtons = [0] * n
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid(sticky='NEWS')
self.createWidgets()
def show_win(self):
msg = "YOU WIN!"
mb.showinfo("Information", msg)
self.makePlayButtons()
def move(self, num):
def move2(self=self, num=num):
index = self.numbers.index(num)
r = index // 4
c = index % 4
if r - 1 >= 0 and self.numbers[4 * (r - 1) + c] == 0:
self.numbers[4 * (r - 1) + c], self.numbers[index] = self.numbers[index], self.numbers[4 * (r - 1) + c]
self.playButtons[index].grid(row=r - 1 + 1, column=c)
self.playButtons[4 * (r - 1) + c], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * (r - 1) + c]
elif r + 1 <= 3 and self.numbers[4 * (r + 1) + c] == 0:
self.numbers[4 * (r + 1) + c], self.numbers[index] = self.numbers[index], self.numbers[4 * (r + 1) + c]
self.playButtons[index].grid(row=r + 1 + 1, column=c)
self.playButtons[4 * (r + 1) + c], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * (r + 1) + c]
elif c + 1 <= 3 and self.numbers[4 * r + c + 1] == 0:
self.numbers[4 * r + c + 1], self.numbers[index] = self.numbers[index], self.numbers[4 * r + c + 1]
self.playButtons[index].grid(row=r + 1, column=c + 1)
self.playButtons[4 * r + c + 1], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * r + c + 1]
elif c - 1 >= 0 and self.numbers[4 * r + c - 1] == 0:
self.numbers[4 * r + c - 1], self.numbers[index] = self.numbers[index], self.numbers[4 * r + c - 1]
self.playButtons[index].grid(row=r + 1, column=c - 1)
self.playButtons[4 * r + c - 1], self.playButtons[index] = self.playButtons[index], self.playButtons[4 * r + c - 1]
if self.numbers == [i % 16 for i in range(1, 17)]:
self.show_win()
return move2
def makePlayButtons(self):
for but in self.playButtons:
if but != 0:
but.destroy()
self.numbers = [i for i in range(n)]
random.shuffle(self.numbers)
self.playButtons = [0] * n
for i in range(n):
if self.numbers[i] == 0:
continue
self.playButtons[i] = tk.Button(self, text=str(self.numbers[i]), command=self.move(self.numbers[i]))
self.playButtons[i].grid(row=i // 4 + 1, column=i % 4, sticky='SENW')
def createWidgets(self):
self.quitButton = tk.Button(self, text='Exit', command=self.quit)
self.newButton = tk.Button(self, text='New', command=self.makePlayButtons)
self.makePlayButtons()
self.quitButton.grid(row=0, column=0)
self.newButton.grid(row=0, column=2)
self.master.columnconfigure(0, weight = 1)
self.master.rowconfigure(0, weight = 1)
for r in range(1, 5):
self.rowconfigure(r, weight = 1)
for c in range(4):
self.columnconfigure(c, weight = 1)
#self.show_win()
app = Application()
app.master.title('15 puzzle')
app.mainloop() |
9,894 | adb6e33dc665f88c82fcc399688a8dbd67b1e3e3 | """
Author:
C.M. Gosmeyer
Date:
Mar 2018
References:
"Introduction to Statistical Problem Solving in Geography",
J.C. McGrew, Jr., A.J. Lembo, Jr., C.B. Monroe
To Do:
Should tables interpolate?
y = y1 + ((x - x1) / (x2 - x1)) * (y2 - y1)
"""
import numpy as np
import pandas as pd
import os
# Get absolute path to table files.
p = os.path.abspath(__file__)
p = '/'.join(p.split('/')[:-1])
class LoadTable(object):
"""
"""
def __init__(self, filename):
self.filename = filename
def load_table(self):
table = pd.read_csv(self.filename)
return table
class LoadNormalTable(LoadTable):
""" A normal table object.
"""
def __init__(self):
LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))
temp_table = self.load_table()
self.normal_table = temp_table.set_index("z")
def find_z(self, prob, tails=1):
""" Given probability, return nearest Z-score from normal table.
Parameters
----------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
tails : int
1 or 2. The prob will be divided by this number (all
calculations assume one tail). Do not change to 2 if your
`prob` value already is divided in half.
Returns
-------
z_score : float
The Z-score or standard score.
"""
prob /= float(tails)
normal_table = self.normal_table
# Find closest probability in table
nearest_probs = []
for col in list(normal_table):
nearest_probs.append(find_nearest(normal_table[col], prob))
nearest_probs = np.asarray(nearest_probs)
final_prob = find_nearest(nearest_probs, prob)
# Return the column and row
for col in list(normal_table):
if final_prob in list(normal_table[col]):
z1 = col
for i in normal_table.index:
if final_prob == normal_table[z1][i]:
z0 = i
# Build Z-score
z_score = float(z0) + float(z1)
return z_score
def find_prob(self, z, tails=1):
""" Given Z-score, return nearest probability from table.
Parameters
----------
z : float
The Z-score or standard score.
tails : int
1 or 2.
Returns
-------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
"""
normal_table = self.normal_table
if z > 4:
prob = 0.5
else:
z0 = round(z, 1)
z1 = str(round(z, 2) - z0)
prob = round(normal_table[z1][z0], 6)
prob *= tails
return prob
class LoadStudentsTTable(LoadTable):
""" A normal table object.
"""
def __init__(self, tails):
"""
Parameters
----------
tails : int
1 or 2.
"""
if tails == 1:
LoadTable.__init__(self, os.path.join(p, 'students_t_table_one_tail.csv'))
else:
LoadTable.__init__(self, os.path.join(p, 'students_t_table_two_tail.csv'))
temp_table = self.load_table()
self.t_table = temp_table.set_index("df")
def find_t(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
t_score : float
The test statistic.
"""
t_table = self.t_table
nearest_confidence = round(find_nearest(list(t_table), 1.0-confidence), 4)
nearest_df = round(find_nearest(t_table.index, df), 0)
t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)
return t_score
def find_confidence(self, t, df):
""" Finds confidence level (area) of ONE tail of distribution.
Parameters
----------
t : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
t_table = self.t_table
nearest_df = round(find_nearest(t_table.index, df), 0)
nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)
for col in list(t_table):
if nearest_t == round(t_table[col][nearest_df], 6):
# Subtract from one to get confidence, divide by two to get
# single section on positive side of distribution.
confidence = (1.0 - float(col)) / 2.0
return confidence
class LoadChi2Table(LoadTable):
""" A normal table object.
"""
def __init__(self):
"""
"""
LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))
temp_table = self.load_table()
self.chi2_table = temp_table.set_index("df")
def find_chi2(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
chi2 : float
The test statistic.
"""
chi2_table = self.chi2_table
nearest_confidence = round(find_nearest(list(chi2_table), 1.0-confidence), 4)
nearest_df = round(find_nearest(chi2_table.index, df), 0)
chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)
return chi2
def find_confidence(self, chi2, df):
""" Finds confidence level (area) of right-hand-side of distribution.
Parameters
----------
chi2 : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
chi2_table = self.chi2_table
nearest_df = round(find_nearest(chi2_table.index, df), 0)
nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)
for col in list(chi2_table):
if nearest_chi2 == round(chi2_table[col][nearest_df], 6):
# Subtract from one to get confidence.
confidence = (1.0 - float(col))
return confidence
def find_nearest(array, value):
array = np.array(array, dtype=float)
value = float(value)
idx = pd.Series((np.abs(array-value))).idxmin()
return array[idx]
|
9,895 | 796a13de72c2879956c5f9c9c9bdef7253760c9d | from matplotlib import pyplot as plt
dev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [4000, 45000, 50000, 55000, 60000,
56000, 62316, 64928, 67317, 68748, 73752]
plt.plot(dev_x, dev_y, label='All Devs')
#dev_x and dev_y are respectively x-axis and y-axis
# Median Python Developer Salaries by Age
py_dev_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
plt.plot(dev_x, py_dev_y, label='Python')
plt.xlabel('Ages')
plt.ylabel('Median Salary')
plt.title('Median Salary (USD) by Age')
#Shows the title above the figure
plt.legend()
#This shows indexing of the chart or figure
plt.show()
|
9,896 | ba5171d3de87ec01770a7174d9783d5058b0fced | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
# prevent numpy exponential
# notation on print, default False
np.set_printoptions(suppress=True)
y_cord_df = pd.DataFrame(data=None, columns=['Time', 'Orien'])
list_no = np.arange(0.0, 108000.0, 1.0)
y_cord_df['Time'] = (list_no*(1/60))/60
rolling_avg_duration= 10 #in seconds
def vel_det(file, legend_label, line_color):
fps=60
data_df = pd.read_hdf(path_or_buf=file)
bodyparts = data_df.columns.get_level_values(1)
coords = data_df.columns.get_level_values(2)
bodyparts2plot = bodyparts
scorer = data_df.columns.get_level_values(0)[0]
Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))
column_title = bodyparts + "_" + coords
data_df.columns = column_title
# calculate the time elapsed per frame and append column
data_df['Time Elapsed'] = Time / fps
# print(data_df)
# what's being plotted
# plt.plot(data_df['Time Elapsed'], data_df['velocity_roll'], color=line_color, marker='o', markersize=0.4, linewidth=0.3, label=legend_label) # scatter plot with faint lines
# plt.plot(data_df['Time Elapsed']/60, data_df['velocity_roll'], color=line_color, linewidth=1, label=legend_label)
# plot formatting
# plt.xlabel('time (seconds)')
# plt.ylabel('velocity (pixels/second)')
# plt.legend(loc=2)
# plt.title('total distance traveled vs. time: ' + path)
animal = []
animal[:] = ' '.join(file.split()[2:5])
# plt.title('Total Distance vs. Time for: ' + ' '.join(file.split()[:2]) + " "+ ''.join(animal[:2]))
# plt.title(str(rolling_avg_duration)+' second Rolling Velocity Pretreat 3mkgNaltrexone+5mgkg U50')
data_df['Time Elapsed'] = Time / fps
y_cord_df[file] = data_df['head_y']
y_cord_df[file+'_orient'] = np.NaN
i = 0
# rear_values = data_df['head_y'].values<=300
rear_values = data_df['head_y'].values <= 300
print(rear_values)
data_df['Orientation']=rear_values
data_df['GR'] = 'groom'
data_df.loc[rear_values == True, 'GR'] = 'rear'
# for time in Time:
# if data_df['head_y'].iloc[time] >= 234:
# data_df[file + '_orient'] = 'rear'
# i=1+i
# # using 1 for rear
# else:
# # 0 for groom/walk
# data_df[file + '_orient'] = 'groom'
# i=1+i
# print(data_df)
# for values in data_df['head_y']:
# if values >= 234:
# y_cord_df.insert(loc=data_df.loc[], column=file + '_orient', value=1, allow_duplicates=True)
# else:
# # 0 for groom/walk
# y_cord_df.insert(loc=i, column=file+'_orient', value=0, allow_duplicates=True)
# i = i+1
# print('iter'+str(i))
# print(data_df['Orientation'])
filt_df = data_df['head_y'] > 400
print(data_df[filt_df])
plt.figure(figsize=(6, 9.5))
# plt.plot(data_df['Time Elapsed']/60, data_df["GR"], color=line_color, linewidth=1, label=legend_label)
# plt.plot(data_df['Time Elapsed']/60, data_df['head_y']*-1, color=line_color, linewidth=1, label=legend_label)
plt.plot(data_df[filt_df].head_y,data_df[filt_df].index/3600, color=line_color, linewidth=1, label=legend_label)
# plt.axhline(y=-300)
leg = plt.legend()
font = {'family': 'Arial',
'size': 12}
plt.rc('font', **font)
plt.rc('lines', linewidth = 1)
for i in leg.legendHandles:
i.set_linewidth(3)
plt.xlabel('y coordinate(pixels)', fontsize=12)
plt.ylabel('time(minutes)', fontsize=12)
plt.title(legend_label)
plt.savefig(legend_label+'.jpg', format='jpg')
plt.show()
if __name__ == '__main__':
"""Saline Data"""
# vel_det(file='Saline_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline F0', line_color='yellowgreen')
# vel_det(file='Saline_Ai14_OPRK1_C2_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline F1', line_color='lightgreen')
# vel_det(file='Saline_Ai14_OPRK1_C1_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline F2', line_color='lightgreen')
#
# vel_det(file='Saline_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M1', line_color='green')
# vel_det(file='Saline_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M2', line_color='lightgreen')
# vel_det(file='Saline_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M3', line_color='lightgreen')
# vel_det(file='Saline_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M4', line_color='lime')
# only_saline = y_cord_df.loc[:, ['Saline_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C2_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]
# y_cord_df['Avg Vel Saline'] = only_saline.mean(axis=1)
# avg_df['Avg Vel Saline SEM'] = stats.sem(only_saline, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel Saline'], color='black', linewidth=1, label='Average Velocity Saline+Saline')
#
"""Naltrexone Data"""
# vel_det(file='Naltr_U50_Ai14_OPRK1_C2_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F0 Pretreat 3mkg Naltrexone+5mgkg U50', line_color='#ee4466')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000filtered.h5',
# legend_label='F1 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='orangered')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F2 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='darkred')
#
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M1 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='red')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M2 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='red')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M3 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='firebrick')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M4 Pretreat 3mgkg Naltrexone+5mkg U50', line_color='darksalmon')
# only_naltr = avg_df.loc[:,
# ['Nalt_U50_Ai14_OPRK1_C1_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Naltr_U50_Ai14_OPRK1_C2_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]
# avg_df['Avg Vel Naltr'] = only_naltr.mean(axis=1)
# avg_df['Avg Vel Naltr SEM'] = stats.sem(only_naltr, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel Naltr'], color='red', linewidth=1, label='Average Velocity 3mgkg Naltr+5mgkg U50')
#
#
"""U50 Data"""
vel_det(file='U50_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='F0 5mgkg U50', line_color='steelblue')
vel_det(file='U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='F1 5mgkg U50', line_color='deepskyblue')
vel_det(file='U50_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='F2 5mgkg U50', line_color='powderblue')
vel_det(file='U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M1 5mgkg U50', line_color='blue')
vel_det(file='U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M2 5mgkg U50', line_color='blue')
vel_det(file='U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M3 5mgkg U50', line_color='lightblue')
vel_det(file='U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M4 5mgkg U50', line_color='turquoise')
# only_U50 = avg_df.loc[:,
# ['U50_Ai14_OPRK1_C1_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C2_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]
# avg_df['Avg Vel U50'] = only_U50.mean(axis=1)
# avg_df['Avg Vel U50 SEM'] = stats.sem(only_U50, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel U50'], color='orange', linewidth=1, label='Average Velocity Saline+5mgkg U50')
#
#
"""NORBNI U50 Data"""
#
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F0_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F0 10mgkg NORBNI+5mgkg U50', line_color='orange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F1_sDLC_resnet50_SideViewNov1shuffle1_180000filtered.h5',
# legend_label='F1 10mgkg NORBNI+5mgkg U50', line_color='darkorange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F2_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F2 10mgkg NORBNI+5mgkg U50', line_color='coral')
#
#
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M1_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M1 10mgkg NORBNI+5mgkg U50', line_color='orange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M2_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M2 10mgkg NORBNI+5mgkg U50', line_color='orange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M3_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M3 10mgkg NORBNI+5mgkg U50', line_color='orange') #tiger color
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M4_SDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M4 10mgkg NORBNI+5mkg U50', line_color='#ed8203') #apricot color
# only_NORBNI = avg_df.loc[:,
# [
# 'NORBNI_U50_Ai14_OPRK1_C2_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',
# 'NORBNI_U50_Ai14_OPRK1_C2_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',
# 'NORBNI_U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',
# 'NORBNI_U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5'
# ]]
# avg_df['Avg Vel NORBNI'] = only_NORBNI.mean(axis=1)
# avg_df['Avg Vel NORBNI SEM'] = stats.sem(only_NORBNI, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel NORBNI'], color='blue', linewidth=1,
# label='Average Velocity 10mgkg NORBNI +5mgkg U50')
#
"""NORBNI Saline"""
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C2_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F1 10mgkg NORBNI+Saline', line_color='purple')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F2 10mgkg NORBNI+Saline', line_color='purple')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F0_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F0 10mgkg NORBNI+Saline', line_color='violet')
#
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M1 10mgkg NORBNI+Saline', line_color='blueviolet')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M2 10mgkg NORBNI+Saline', line_color='blueviolet')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M4 10mkg NORBNI+Saline', line_color='mediumorchid')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M3 10mgkg NORBNI+Saline', line_color='purple')
#
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel Saline"]-avg_df["Avg Vel Saline SEM"],
# avg_df["Avg Vel Saline"]+avg_df["Avg Vel Saline SEM"], alpha=0.25, facecolor='black', edgecolor='black')
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel Naltr"]-avg_df["Avg Vel Naltr SEM"],
# avg_df["Avg Vel Naltr"]+avg_df["Avg Vel Naltr SEM"], alpha=0.25, facecolor='red', edgecolor='red')
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel U50"]-avg_df["Avg Vel U50 SEM"],
# avg_df["Avg Vel U50"]+avg_df["Avg Vel U50 SEM"], alpha=0.25, facecolor='orange', edgecolor='orange')
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel NORBNI"]-avg_df["Avg Vel NORBNI SEM"],
# avg_df["Avg Vel NORBNI"]+avg_df["Avg Vel NORBNI SEM"], alpha=0.25, facecolor='blue', edgecolor='blue')
# plt.plot()
# leg = plt.legend()
# font = {'family': 'Arial',
# 'size': 12}
# plt.rc('font', **font)
# plt.rc('lines', linewidth = 1)
# for i in leg.legendHandles:
# i.set_linewidth(3)
# plt.xlabel('time (minutes)', fontsize=12)
# plt.ylabel('pixel', fontsize=12)
# plt.title('F2 NORBNI, NORBNI+U50, Saline Head Inverted Y-coordinate')
# plt.show() |
9,897 | a210a015284130f23bfec99898f2f21163a33a67 | import itertools
n = int(input())
a = [list(map(int, input().split(" "))) for i in range(n)]
ans = 0
for [ix,iy], [jx, jy] in itertools.combinations(a, 2):
ans += ((jx-ix)**2+(jy-iy)**2)**0.5*2
print(ans/n) |
9,898 | cce1b6f8e4b3f78adfa2243fe49b4994d35c5a38 | #!/usr/bin/env python
# encoding: utf-8
'''
1D2DCNN抽取特征,LSTM后提取特征,最后将提取的特征进行拼接,CNN与LSTM是交叉在一起的
'''
# 导入相关的包
import keras
# 导入相关层的结构
from keras.models import Sequential
from keras.layers import Conv1D, Conv2D, MaxPooling1D, MaxPooling2D, Flatten, Dense, Dropout,LSTM,Reshape
from keras import Model
# 可视化神经网络
from keras.utils import plot_model
def merge_model(model_1, model_2):
'''
keras将两个独立的模型融合起来
:param model_1:
:param model_2:
:return:
'''
# model_1.load_weights('model_1_weight.h5')#这里可以加载各自权重
# model_2.load_weights('model_2_weight.h5')#可以是预训练好的模型权重(迁移学习)
inp1 = model_1.input # 第一个模型的参数
inp2 = model_2.input # 第二个模型的参数
r1 = model_1.output
r2 = model_2.output
x = keras.layers.Concatenate(axis=1)([r1, r2])
model = Model(inputs=[inp1, inp2], outputs=x)
return model
def addLayers_model(model):
'''
修改模型(模型加层)
采用 keras 的 Concatenate 进行特征融合之后,模型加层的 add 将无效,所以采用这种方案进行加层
:param model: 待扩层的模型
:return:
'''
origin_model = model
for layer in origin_model.layers:
layer.trainable = False # 原来的不训练,冻结网络层
inp = origin_model.input
x = origin_model.output
den = Dense(512, name="fine_dense")(x)
l = Dropout(0.5)(den)
result = Dense(10, activation="softmax")(l)
model = Model(input=inp, outputs=result)
return model
input_shape_1D = (1024, 1)
input_shape_2D = (32, 32, 1)
# 构建模型
# 网络结构(卷积层:relu - 池化层 - 卷积层 - 池化层 - Flatten - 汇聚层 - 全连接层 - Dropout - softmax)
# ====================1、 1D部分 ==============================
model1 = Sequential()
# Conv1D:8 @ 1*1024。8个过滤器(卷积核),卷积核大小设置为3
model1.add(Conv1D(filters=8,
kernel_size=(3),
input_shape=input_shape_1D,
padding='same',
activation='relu'))
# MaxPooling1D:8 @ 1*512。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
# Conv1D:16 @ 1*512。16个过滤器,大小设置为3
model1.add(Conv1D(filters=16,
kernel_size=(3),
input_shape=(1, 512),
padding='same',
activation='relu'))
# MaxPooling1D:16 @ 1*256。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
'''
# Conv1D: 16 @ 1*256 。16个过滤器,大小设置为3
model1.add(Conv1D(filters=16,
kernel_size=(3),
input_shape=(1, 512),
padding='same',
activation='relu'))
# MaxPooling1D:16 @ 1*128。
model1.add(MaxPooling1D(pool_size=(2), padding='same'))
'''
model1.add(LSTM(32,return_sequences=True))
model1.add(Flatten()) # 压平:将输出压平为1维
# =============================================================
# ============ ======== 2、 2D部分 ============================
model2 = Sequential()
# Conv2D:8 @ 32*32。8个过滤器(卷积核),卷积核大小设置为3*3
model2.add(Conv2D(filters=8,
kernel_size=(3, 3),
input_shape=input_shape_2D,
padding='same',
activation='relu'))
# MaxPooling2D:8 @ 16*16。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
# Conv2D:16 @ 16*16。16个过滤器,卷积核大小设置为3*3
model2.add(Conv2D(filters=16,
kernel_size=(3, 3),
input_shape=(16, 16, 1),
padding='same',
activation='relu'))
# MaxPooling2D:16 @ 8*8。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
'''
# Conv2D:16 @ 8*8。16个过滤器,卷积核大小设置为3*3
model2.add(Conv2D(filters=16,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'))
# MaxPooling2D:16 @ 4*4。
model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
'''
print("model2两层卷积后的输出形状:",model2.output_shape) # (None,4,4,16)
model2.add(Reshape((64,16))) #(None,16,16)
model2.add(LSTM(32,return_sequences=True))
model2.add(Flatten())
# =============================================================
# ==================== 3、汇聚层 ===============================
# 融合部分
model = merge_model(model1, model2)
model.summary()
# =============================================================
print("model.outputs:",model.output.shape)
# ============= 4、 全连接层,dropout,分类层 ====================
model = addLayers_model(model)
print(model.summary())
plot_model(model, to_file='model/1D2DLSTM_cross.png')
# =============================================================
# ==================== 5、模型训练指标 ==========================
# adam优化器, lr:初始学习率为0.1,学习率下降递减采用:ReduceLROnPlateau,在 model.fit 的回调函数中设置
# adam = keras.optimizers.Adam(lr=0.1)
adam = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
# =============================================================
# 保存模型结构
model.save('model/1D2DLSTM_cross.h5')
|
9,899 | c55b768466309d2e655c9222e0674a6bc2a958b3 | import json
import os
from subprocess import PIPE, Popen as popen
from unittest import TestCase
from substra.commands import Config
objective = [[{
'descriptionStorageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/description/',
'key': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'name': 'macro-average recall',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'},
'name': 'Skin Lesion Classification Challenge',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'testDataKeys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1']}, {
'descriptionStorageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/description/',
'key': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'metrics': {'hash': '0bc732c26bafdc41321c2bffd35b6835aa35f7371a4eb02994642c2c3a688f60',
'name': 'macro-average recall',
'storageAddress': 'http://owkin.substrabac:8000/objective/6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c/metrics/'},
'name': 'Simplified skin lesion classification',
'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',
'testDataKeys': ['2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e',
'533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1']}]]
data_manager = [[{'objectiveKeys': [],
'description': {'hash': '7a90514f88c70002608a9868681dd1589ea598e78d00a8cd7783c3ea0f9ceb09',
'storageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/description/'},
'key': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'name': 'ISIC 2018',
'nbData': 2,
'openerStorageAddress': 'http://chunantes.substrabac:8001/data_manager/ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994/opener/',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'size': 553113, 'type': 'Images'}, {
'objectiveKeys': ['6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f'],
'description': {'hash': '258bef187a166b3fef5cb86e68c8f7e154c283a148cd5bc344fec7e698821ad3',
'storageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/description/'},
'key': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0',
'name': 'Simplified ISIC 2018', 'nbData': 6,
'openerStorageAddress': 'http://owkin.substrabac:8000/data_manager/b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0/opener/',
'owner': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55', 'permissions': 'all',
'size': 1415097, 'type': 'Images'}]]
data = [{'pkhash': 'e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1/0024900.zip'},
{'pkhash': '4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/4b5152871b181d10ee774c10458c064c70710f4ba35938f10c0b7aa51f7dc010/0024701.zip'},
{'pkhash': '93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/93e4b1e040b08cfa8a68b13f9dddb95a6672e8a377378545b2b1254691cfc060/0024317.zip'},
{'pkhash': 'eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/eed4c6ea09babe7ca6428377fff6e54102ef5cdb0cae593732ddbe3f224217cb/0024316.zip'},
{'pkhash': '2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/2d0f943aa81a9cb3fe84b162559ce6aff068ccb04e0cb284733b8f9d7e06517e/0024315.zip'},
{'pkhash': '533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1', 'validated': True,
'file': 'http://owkin.substrabac:8000/media/data/533ee6e7b9d8b247e7e853b24547f57e6ef351852bac0418f13a0666173448f1/0024318.zip'}]
algo = [[{'objectiveKey': '6b8d16ac3eae240743428591943fa8e66b34d4a7e0f4eb8e560485c7617c222c',
'description': {'hash': '3b1281cbdd6ebfec650d0a9f932a64e45a27262848065d7cecf11fd7191b4b1f',
'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/description/'},
'key': '7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0',
'name': 'Logistic regression for balanced problem',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/7742aea2001ceb40e9ce8a37fa27237d5b2d1f574e06d48677af945cfdf42ec0/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': 'b9463411a01ea00869bdffce6e59a5c100a4e635c0a9386266cad3c77eb28e9e',
'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/description/'},
'key': '0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f', 'name': 'Neural Network',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/0acc5180e09b6a6ac250f4e3c172e2893f617aa1c22ef1f379019d20fe44142f/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': '124a0425b746d7072282d167b53cb6aab3a31bf1946dae89135c15b0126ebec3',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/description/'},
'key': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f', 'name': 'Logistic regression',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
{'objectiveKey': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'description': {'hash': '4acea40c4b51996c88ef279c5c9aa41ab77b97d38c5ca167e978a98b2e402675',
'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/description/'},
'key': 'f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284', 'name': 'Random Forest',
'owner': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f', 'permissions': 'all',
'storageAddress': 'http://chunantes.substrabac:8001/algo/f2d9fd38e25cd975c49f3ce7e6739846585e89635a86689b5db42ab2c0c57284/file/'}]]
model = [[{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',
'name': 'Logistic regression',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},
'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',
'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',
'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},
'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',
'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',
'permissions': 'all', 'startModel': None, 'status': 'done',
'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],
'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,
'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'}, 'trainData': {
'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',
'42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],
'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994', 'perf': 1,
'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]]
traintuple = [{'algo': {'hash': '6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f',
'name': 'Logistic regression',
'storageAddress': 'http://chunantes.substrabac:8001/algo/6dcbfcf29146acd19c6a2997b2e81d0cd4e88072eea9c90bbac33f0e8573993f/file/'},
'objective': {'hash': 'd5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f',
'metrics': {'hash': '750f622262854341bd44f55c1018949e9c119606ef5068bd7d137040a482a756',
'storageAddress': 'http://chunantes.substrabac:8001/objective/d5002e1cd50bd5de5341df8a7b7d11b6437154b3b08f531c9b8f93889855c66f/metrics/'}},
'creator': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f',
'endModel': {'hash': 'fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb',
'storageAddress': 'http://chunantes.substrabac:8001/model/fe900588d43263c0ce1709116fe07c68d299acbbb6cfb241b0e8795bc8a1fbcb/file/'},
'key': '3e1a9e122765b2976f393322ab9d1c59fb113b35e2531900e06c9ae0f41e8afb',
'log': 'Train - CPU:100.23 % - Mem:0.14 GB - GPU:0.00 % - GPU Mem:0.00 GB; Test - CPU:0.00 % - Mem:0.00 GB - GPU:0.00 % - GPU Mem:0.00 GB; ',
'permissions': 'all', 'startModel': None, 'status': 'done',
'testData': {'keys': ['e11aeec290749e4c50c91305e10463eced8dbf3808971ec0c6ea0e36cb7ab3e1'],
'openerHash': 'b4d2deeb9a59944d608e612abc8595c49186fa24075c4eb6f5e6050e4f9affa0', 'perf': 1,
'worker': 'ca77d9070da2732f3dc1fcdb9397cfcf2fad2dcdde4e355dfe34658ad8b9ce55'},
'trainData': {'keys': ['62fb3263208d62c7235a046ee1d80e25512fe782254b730a9e566276b8c0ef3a',
'42303efa663015e729159833a12ffb510ff92a6e386b8152f90f6fb14ddc94c9'],
'openerHash': 'ccbaa3372bc74bce39ce3b138f558b3a7558958ef2f244576e18ed75b0cea994',
'perf': 1, 'worker': '91df1c847f714ae3ac9d83ef000c583a2c5e63719bdfe23958ca47a8ffe9a82f'}}]
# Run this test only after an e2e multi orgs
class TestList(TestCase):
def setUp(self):
Config({
'<url>': 'http://owkin.substrabac:8000',
'<version>': '0.0',
'<user>': os.environ.get('BACK_AUTH_USER', ''),
'<password>': os.environ.get('BACK_AUTH_PASSWORD', ''),
'--config': '/tmp/.substra_e2e'
}).run()
def tearDown(self):
try:
os.remove('/tmp/.substra_e2e')
except:
pass
def test_list_objective(self):
output = popen(['substra', 'list', 'objective', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == objective)
def test_list_data_manager(self):
output = popen(['substra', 'list', 'data_manager', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == data_manager)
def test_list_data(self):
output = popen(['substra', 'list', 'data_sample', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == data)
def test_list_algo(self):
output = popen(['substra', 'list', 'algo', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == algo)
def test_list_model(self):
output = popen(['substra', 'list', 'model', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == model)
def test_list_traintuple(self):
output = popen(['substra', 'list', 'traintuple', '--config=/tmp/.substra_e2e'], stdout=PIPE).communicate()[0]
res = output.decode('utf-8')
self.assertTrue(json.loads(res) == traintuple)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.